Repository: ajslater/codex Branch: main Commit: ec9e22f4e447 Files: 768 Total size: 2.0 MB Directory structure: gitextract_jcsuoc91/ ├── .circleci/ │ └── config.yml ├── .dockerignore ├── .github/ │ └── workflows/ │ └── ci.yml ├── .gitignore ├── .picopt_treestamps.yaml ├── .prettierignore ├── .readthedocs.yaml ├── .shellcheckrc ├── CLAUDE.md ├── Dockerfile ├── LICENSE ├── Makefile ├── NEWS.md ├── README.md ├── bin/ │ ├── benchmark-opds.sh │ ├── build-choices.sh │ ├── build-dist.sh │ ├── ci-download-dist-if-identical.sh │ ├── clean-pycache.sh │ ├── collectstatic.sh │ ├── create-output-dirs.sh │ ├── delete-files.sh │ ├── dev-docker.sh │ ├── dev-module.sh │ ├── dev-prod-server.sh │ ├── dev-reverse-proxy.sh │ ├── dev-server.sh │ ├── dev-ttabs.sh │ ├── docker-compose-exit.sh │ ├── docker-tag-latest.sh │ ├── fix-docker.sh │ ├── fix-python.sh │ ├── fix.sh │ ├── icons_transform.py │ ├── kill-codex.sh │ ├── kill-eslint_d.sh │ ├── lint-ci.sh │ ├── lint-complexity.sh │ ├── lint-darwin.sh │ ├── lint-docker.sh │ ├── lint-python.sh │ ├── lint.sh │ ├── localize-db.sh │ ├── localize_library.sql │ ├── manage.py │ ├── pm │ ├── prettier-nginx.sh │ ├── roman.py │ ├── sort-ignore.sh │ ├── test-python.sh │ ├── uml.sh │ ├── update-deps-node.sh │ ├── update-deps-python.sh │ ├── vendor-diff-package.sh │ ├── vendor-patch-imports.sh │ ├── version-node.sh │ └── version-python.sh ├── cfg/ │ ├── ci.mk │ ├── codex.mk │ ├── common.mk │ ├── django.mk │ ├── docker.mk │ ├── docs.mk │ ├── eslint.config.base.js │ ├── frontend.mk │ ├── help.mk │ ├── node.mk │ ├── node_root.mk │ └── python.mk ├── ci/ │ ├── Dockerfile │ ├── base.Dockerfile │ ├── builder-base.Dockerfile │ ├── circleci-step-halt.sh │ ├── cleanup-repo.py │ ├── debian.sources │ ├── dev.Dockerfile │ ├── dist-builder.Dockerfile │ ├── docker-bake.hcl │ ├── docker-build-image.sh │ ├── docker-compose-exit.sh │ ├── docker-init.sh │ ├── docker-push.sh │ ├── docker-tag-remote-version-as-latest.sh │ ├── machine-arch.sh │ ├── machine-env.sh │ ├── machine-init.sh │ ├── machine-packages.sh │ ├── package.Dockerfile │ ├── python-publish.sh │ ├── version-checksum.sh │ ├── version-codex-base.sh │ ├── version-codex-builder-base.sh │ ├── version-codex-dist-builder.sh │ ├── versions-create-env.sh │ └── versions-env-filename.sh ├── codex/ │ ├── __init__.py │ ├── applications/ │ │ ├── __init__.py │ │ ├── lifespan.py │ │ └── websocket.py │ ├── asgi.py │ ├── authentication.py │ ├── choices/ │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── browser.py │ │ ├── choices_to_json.py │ │ ├── jobs.py │ │ ├── notifications.py │ │ ├── reader.py │ │ ├── search.py │ │ └── statii.py │ ├── librarian/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── bookmark/ │ │ │ ├── __init__.py │ │ │ ├── bookmarkd.py │ │ │ ├── latest_version.py │ │ │ ├── tasks.py │ │ │ ├── update.py │ │ │ └── user_active.py │ │ ├── covers/ │ │ │ ├── __init__.py │ │ │ ├── coverd.py │ │ │ ├── create.py │ │ │ ├── path.py │ │ │ ├── purge.py │ │ │ ├── status.py │ │ │ └── tasks.py │ │ ├── cron/ │ │ │ ├── __init__.py │ │ │ └── crond.py │ │ ├── fs/ │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── event_batcherd.py │ │ │ ├── events.py │ │ │ ├── filters.py │ │ │ ├── poller/ │ │ │ │ ├── __init__.py │ │ │ │ ├── events.py │ │ │ │ ├── poller.py │ │ │ │ ├── snapshot.py │ │ │ │ ├── snapshot_diff.py │ │ │ │ ├── status.py │ │ │ │ └── tasks.py │ │ │ ├── status.py │ │ │ ├── tasks.py │ │ │ └── watcher/ │ │ │ ├── __init__.py │ │ │ ├── data.py │ │ │ ├── dirs.py │ │ │ ├── events.py │ │ │ ├── move.py │ │ │ ├── status.py │ │ │ ├── tasks.py │ │ │ └── watcher.py │ │ ├── librariand.py │ │ ├── memory.py │ │ ├── mp_queue.py │ │ ├── notifier/ │ │ │ ├── __init__.py │ │ │ ├── notifierd.py │ │ │ └── tasks.py │ │ ├── restarter/ │ │ │ ├── __init__.py │ │ │ ├── restarter.py │ │ │ ├── status.py │ │ │ └── tasks.py │ │ ├── scribe/ │ │ │ ├── __init__.py │ │ │ ├── importer/ │ │ │ │ ├── __init__.py │ │ │ │ ├── const.py │ │ │ │ ├── create/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── comics.py │ │ │ │ │ ├── const.py │ │ │ │ │ ├── covers.py │ │ │ │ │ ├── folders.py │ │ │ │ │ ├── foreign_keys.py │ │ │ │ │ └── link_fks.py │ │ │ │ ├── delete/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── comics.py │ │ │ │ │ ├── covers.py │ │ │ │ │ └── folders.py │ │ │ │ ├── failed/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── create.py │ │ │ │ │ ├── failed.py │ │ │ │ │ └── query.py │ │ │ │ ├── finish.py │ │ │ │ ├── importer.py │ │ │ │ ├── init.py │ │ │ │ ├── link/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── const.py │ │ │ │ │ ├── covers.py │ │ │ │ │ ├── delete.py │ │ │ │ │ ├── many_to_many.py │ │ │ │ │ ├── prepare.py │ │ │ │ │ └── sum.py │ │ │ │ ├── moved/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── comics.py │ │ │ │ │ ├── covers.py │ │ │ │ │ └── folders.py │ │ │ │ ├── query/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── covers.py │ │ │ │ │ ├── filters.py │ │ │ │ │ ├── foreign_keys.py │ │ │ │ │ ├── links.py │ │ │ │ │ ├── links_fk.py │ │ │ │ │ ├── links_m2m.py │ │ │ │ │ ├── update_comics.py │ │ │ │ │ └── update_fks.py │ │ │ │ ├── read/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── aggregate_path.py │ │ │ │ │ ├── const.py │ │ │ │ │ ├── extract.py │ │ │ │ │ ├── folders.py │ │ │ │ │ ├── foreign_keys.py │ │ │ │ │ └── many_to_many.py │ │ │ │ ├── search/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── prepare.py │ │ │ │ │ ├── sync_m2m.py │ │ │ │ │ └── update.py │ │ │ │ ├── statii/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── create.py │ │ │ │ │ ├── delete.py │ │ │ │ │ ├── failed.py │ │ │ │ │ ├── link.py │ │ │ │ │ ├── moved.py │ │ │ │ │ ├── query.py │ │ │ │ │ ├── read.py │ │ │ │ │ └── search.py │ │ │ │ ├── status.py │ │ │ │ └── tasks.py │ │ │ ├── janitor/ │ │ │ │ ├── __init__.py │ │ │ │ ├── adopt_folders.py │ │ │ │ ├── cleanup.py │ │ │ │ ├── failed_imports.py │ │ │ │ ├── integrity/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ └── foreign_keys.py │ │ │ │ ├── janitor.py │ │ │ │ ├── scheduled_time.py │ │ │ │ ├── status.py │ │ │ │ ├── tasks.py │ │ │ │ ├── update.py │ │ │ │ └── vacuum.py │ │ │ ├── lazy_importer.py │ │ │ ├── priority.py │ │ │ ├── scribed.py │ │ │ ├── search/ │ │ │ │ ├── __init__.py │ │ │ │ ├── const.py │ │ │ │ ├── handler.py │ │ │ │ ├── optimize.py │ │ │ │ ├── prepare.py │ │ │ │ ├── remove.py │ │ │ │ ├── status.py │ │ │ │ ├── sync.py │ │ │ │ └── tasks.py │ │ │ ├── status.py │ │ │ ├── tasks.py │ │ │ └── timestamp_update.py │ │ ├── status.py │ │ ├── status_controller.py │ │ ├── tasks.py │ │ ├── telemeter/ │ │ │ ├── __init__.py │ │ │ ├── scheduled_time.py │ │ │ ├── stats.py │ │ │ ├── tasks.py │ │ │ └── telemeter.py │ │ ├── threads.py │ │ └── worker.py │ ├── middleware.py │ ├── migrations/ │ │ ├── 0001_init.py │ │ ├── 0002_auto_20200826_0622.py │ │ ├── 0003_auto_20200831_2033.py │ │ ├── 0004_failedimport.py │ │ ├── 0005_auto_20200918_0146.py │ │ ├── 0006_update_default_names_and_remove_duplicate_comics.py │ │ ├── 0007_auto_20211210_1710.py │ │ ├── 0008_alter_comic_created_at_alter_comic_format_and_more.py │ │ ├── 0009_alter_comic_parent_folder.py │ │ ├── 0010_haystack.py │ │ ├── 0011_library_groups_and_metadata_changes.py │ │ ├── 0012_rename_description_comic_comments.py │ │ ├── 0013_int_issue_count_longer_charfields.py │ │ ├── 0014_pdf_issue_suffix_remove_cover_image_sort_name.py │ │ ├── 0015_link_comics_to_top_level_folders.py │ │ ├── 0016_remove_comic_cover_path_librarianstatus.py │ │ ├── 0017_alter_timestamp_options_alter_adminflag_name_and_more.py │ │ ├── 0018_rename_userbookmark_bookmark.py │ │ ├── 0019_delete_queuejob.py │ │ ├── 0020_remove_search_tables.py │ │ ├── 0021_bookmark_fit_to_choices_read_in_reverse.py │ │ ├── 0022_bookmark_vertical_useractive_null_statuses.py │ │ ├── 0023_rename_credit_creator_and_more.py │ │ ├── 0024_comic_gtin_comic_story_arc_number.py │ │ ├── 0025_add_story_arc_number.py │ │ ├── 0026_comicbox_1.py │ │ ├── 0027_import_order_and_covers.py │ │ ├── 0028_telemeter.py │ │ ├── 0029_comicfts.py │ │ ├── 0030_nocase_collation_day_month_indexes_status_types.py │ │ ├── 0031_adminflag_banner.py │ │ ├── 0032_alter_librarianstatus_preactive.py │ │ ├── 0033_alter_librarianstatus_status_type.py │ │ ├── 0034_comicbox2.py │ │ ├── 0035_fts_optmize.py │ │ ├── 0036_alter_comic_path_alter_customcover_path_and_more.py │ │ ├── 0037_redefine_reading_direction_filetype_choices.py │ │ ├── 0038_settings_tables.py │ │ └── __init__.py │ ├── models/ │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── base.py │ │ ├── bookmark.py │ │ ├── choices.py │ │ ├── comic.py │ │ ├── fields.py │ │ ├── functions.py │ │ ├── groups.py │ │ ├── identifier.py │ │ ├── library.py │ │ ├── named.py │ │ ├── paths.py │ │ ├── query.py │ │ ├── settings.py │ │ └── util.py │ ├── run.py │ ├── serializers/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── admin/ │ │ │ ├── __init__.py │ │ │ ├── flags.py │ │ │ ├── groups.py │ │ │ ├── libraries.py │ │ │ ├── stats.py │ │ │ ├── tasks.py │ │ │ └── users.py │ │ ├── auth.py │ │ ├── browser/ │ │ │ ├── __init__.py │ │ │ ├── choices.py │ │ │ ├── filters.py │ │ │ ├── metadata.py │ │ │ ├── mixins.py │ │ │ ├── mtime.py │ │ │ ├── page.py │ │ │ ├── saved.py │ │ │ └── settings.py │ │ ├── fields/ │ │ │ ├── __init__.py │ │ │ ├── auth.py │ │ │ ├── base.py │ │ │ ├── browser.py │ │ │ ├── group.py │ │ │ ├── reader.py │ │ │ ├── sanitized.py │ │ │ ├── settings.py │ │ │ ├── stats.py │ │ │ └── vuetify.py │ │ ├── homepage.py │ │ ├── mixins.py │ │ ├── models/ │ │ │ ├── __init__.py │ │ │ ├── admin.py │ │ │ ├── base.py │ │ │ ├── bookmark.py │ │ │ ├── comic.py │ │ │ ├── groups.py │ │ │ ├── named.py │ │ │ └── pycountry.py │ │ ├── opds/ │ │ │ ├── __init__.py │ │ │ ├── authentication.py │ │ │ ├── urls.py │ │ │ ├── v1.py │ │ │ └── v2/ │ │ │ ├── __init__.py │ │ │ ├── facet.py │ │ │ ├── feed.py │ │ │ ├── links.py │ │ │ ├── metadata.py │ │ │ ├── progression.py │ │ │ ├── publication.py │ │ │ └── unused.py │ │ ├── reader.py │ │ ├── redirect.py │ │ ├── route.py │ │ ├── settings.py │ │ └── versions.py │ ├── settings/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── codex.toml.default │ │ ├── config.py │ │ ├── hypercorn_migrate.py │ │ ├── logging.py │ │ ├── secret_key.py │ │ ├── servestatic.py │ │ └── timezone.py │ ├── signals/ │ │ ├── __init__.py │ │ ├── django_signals.py │ │ └── os_signals.py │ ├── startup/ │ │ ├── __init__.py │ │ ├── custom_cover_libraries.py │ │ ├── db.py │ │ ├── loguru.py │ │ └── registration.py │ ├── static_src/ │ │ ├── img/ │ │ │ └── .picopt_treestamps.yaml │ │ ├── pwa/ │ │ │ └── offline.html │ │ └── robots.txt │ ├── templates/ │ │ ├── README.md │ │ ├── headers-icons.html │ │ ├── headers-script-globals.html │ │ ├── index.html │ │ ├── opds_v1/ │ │ │ ├── index.xml │ │ │ └── opensearch_v1.xml │ │ └── pwa/ │ │ ├── headers.html │ │ ├── manifest.webmanifest │ │ ├── serviceworker-register.js │ │ └── serviceworker.js │ ├── urls/ │ │ ├── __init__.py │ │ ├── api/ │ │ │ ├── __init__.py │ │ │ ├── admin.py │ │ │ ├── auth.py │ │ │ ├── browser.py │ │ │ ├── reader.py │ │ │ ├── root.py │ │ │ └── v3.py │ │ ├── app.py │ │ ├── const.py │ │ ├── converters.py │ │ ├── opds/ │ │ │ ├── __init__.py │ │ │ ├── authentication.py │ │ │ ├── binary.py │ │ │ ├── root.py │ │ │ ├── v1.py │ │ │ └── v2.py │ │ ├── pwa.py │ │ ├── root.py │ │ └── spectacular.py │ ├── util.py │ ├── version.py │ ├── views/ │ │ ├── README.md │ │ ├── __init__.py │ │ ├── admin/ │ │ │ ├── __init__.py │ │ │ ├── api_key.py │ │ │ ├── auth.py │ │ │ ├── flag.py │ │ │ ├── group.py │ │ │ ├── library.py │ │ │ ├── permissions.py │ │ │ ├── stats.py │ │ │ ├── tasks.py │ │ │ └── user.py │ │ ├── auth.py │ │ ├── bookmark.py │ │ ├── browser/ │ │ │ ├── __init__.py │ │ │ ├── annotate/ │ │ │ │ ├── __init__.py │ │ │ │ ├── bookmark.py │ │ │ │ ├── card.py │ │ │ │ └── order.py │ │ │ ├── bookmark.py │ │ │ ├── breadcrumbs.py │ │ │ ├── browser.py │ │ │ ├── choices.py │ │ │ ├── const.py │ │ │ ├── cover.py │ │ │ ├── download.py │ │ │ ├── filters/ │ │ │ │ ├── __init__.py │ │ │ │ ├── bookmark.py │ │ │ │ ├── field.py │ │ │ │ ├── filter.py │ │ │ │ ├── group.py │ │ │ │ └── search/ │ │ │ │ ├── __init__.py │ │ │ │ ├── field/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── column.py │ │ │ │ │ ├── expression.py │ │ │ │ │ ├── filter.py │ │ │ │ │ ├── optimize.py │ │ │ │ │ └── parse.py │ │ │ │ ├── fts.py │ │ │ │ └── parse.py │ │ │ ├── group_mtime.py │ │ │ ├── metadata/ │ │ │ │ ├── __init__.py │ │ │ │ ├── annotate.py │ │ │ │ ├── const.py │ │ │ │ ├── copy_intersections.py │ │ │ │ └── query_intersections.py │ │ │ ├── mtime.py │ │ │ ├── order_by.py │ │ │ ├── page_in_bounds.py │ │ │ ├── paginate.py │ │ │ ├── params.py │ │ │ ├── saved_settings.py │ │ │ ├── settings.py │ │ │ ├── title.py │ │ │ └── validate.py │ │ ├── const.py │ │ ├── download.py │ │ ├── error.py │ │ ├── exceptions.py │ │ ├── frontend.py │ │ ├── healthcheck.py │ │ ├── lazy_import.py │ │ ├── mixins.py │ │ ├── opds/ │ │ │ ├── __init__.py │ │ │ ├── auth.py │ │ │ ├── authentication/ │ │ │ │ ├── __init__.py │ │ │ │ └── v1.py │ │ │ ├── binary.py │ │ │ ├── const.py │ │ │ ├── error.py │ │ │ ├── feed.py │ │ │ ├── metadata.py │ │ │ ├── opensearch/ │ │ │ │ ├── __init__.py │ │ │ │ └── v1.py │ │ │ ├── settings.py │ │ │ ├── start.py │ │ │ ├── urls.py │ │ │ ├── user_agent.py │ │ │ ├── v1/ │ │ │ │ ├── __init__.py │ │ │ │ ├── const.py │ │ │ │ ├── entry/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── entry.py │ │ │ │ │ └── links.py │ │ │ │ ├── facets.py │ │ │ │ ├── feed.py │ │ │ │ └── links.py │ │ │ └── v2/ │ │ │ ├── __init__.py │ │ │ ├── const.py │ │ │ ├── feed/ │ │ │ │ ├── __init__.py │ │ │ │ ├── feed_links.py │ │ │ │ ├── groups.py │ │ │ │ ├── links.py │ │ │ │ └── publications.py │ │ │ ├── href.py │ │ │ ├── manifest.py │ │ │ └── progression.py │ │ ├── public.py │ │ ├── pwa.py │ │ ├── reader/ │ │ │ ├── __init__.py │ │ │ ├── arcs.py │ │ │ ├── books.py │ │ │ ├── page.py │ │ │ ├── params.py │ │ │ ├── reader.py │ │ │ └── settings.py │ │ ├── settings.py │ │ ├── template.py │ │ ├── timezone.py │ │ ├── util.py │ │ └── version.py │ └── websockets/ │ ├── README.md │ ├── __init__.py │ ├── consumers.py │ ├── listener.py │ └── mp_queue.py ├── compose.yaml ├── docs/ │ ├── DOCKER.md │ ├── WINDOWS.md │ ├── requirements.txt │ ├── style.material.css │ ├── style.mkdocs.css │ └── style.readthedocs.css ├── eslint.config.js ├── frontend/ │ ├── .gitignore │ ├── .npmrc │ ├── .prettierignore │ ├── .remarkignore │ ├── .shellcheckrc │ ├── Makefile │ ├── README.md │ ├── bin/ │ │ ├── dev-server.sh │ │ ├── fix.sh │ │ ├── kill-eslint_d.sh │ │ ├── lint-darwin.sh │ │ ├── lint.sh │ │ ├── roman.py │ │ ├── sort-ignore.sh │ │ ├── update-deps-node.sh │ │ └── version-node.sh │ ├── cfg/ │ │ ├── codex-frontend.mk │ │ ├── common.mk │ │ ├── help.mk │ │ └── node.mk │ ├── jsconfig.json │ ├── package.json │ ├── src/ │ │ ├── admin.vue │ │ ├── api/ │ │ │ └── v3/ │ │ │ ├── admin.js │ │ │ ├── auth.js │ │ │ ├── base.js │ │ │ ├── browser.js │ │ │ ├── common.js │ │ │ ├── notify.js │ │ │ ├── reader.js │ │ │ └── vuetify-items.js │ │ ├── app.vue │ │ ├── browser.vue │ │ ├── comic-name.js │ │ ├── components/ │ │ │ ├── admin/ │ │ │ │ ├── admin-header.vue │ │ │ │ ├── browser-link.vue │ │ │ │ ├── create-update-dialog/ │ │ │ │ │ ├── create-update-button.vue │ │ │ │ │ ├── create-update-dialog.vue │ │ │ │ │ ├── create-update-inputs-mixin.js │ │ │ │ │ ├── duration-input.vue │ │ │ │ │ ├── group-create-update-inputs.vue │ │ │ │ │ ├── library-create-update-inputs.vue │ │ │ │ │ ├── relation-picker.vue │ │ │ │ │ ├── server-folder-picker.vue │ │ │ │ │ └── user-create-update-inputs.vue │ │ │ │ ├── drawer/ │ │ │ │ │ ├── admin-menu.vue │ │ │ │ │ ├── admin-settings-button-progress.vue │ │ │ │ │ ├── admin-settings-drawer.vue │ │ │ │ │ ├── admin-settings-panel.vue │ │ │ │ │ ├── status-list-item.vue │ │ │ │ │ └── status-list.vue │ │ │ │ ├── group-chip.vue │ │ │ │ ├── status-helpers.js │ │ │ │ ├── tabs/ │ │ │ │ │ ├── admin-table.vue │ │ │ │ │ ├── custom-covers-panel.vue │ │ │ │ │ ├── datetime-column.vue │ │ │ │ │ ├── delete-row-dialog.vue │ │ │ │ │ ├── failed-imports-panel.vue │ │ │ │ │ ├── flag-descriptions.json │ │ │ │ │ ├── flag-tab.vue │ │ │ │ │ ├── group-tab.vue │ │ │ │ │ ├── job-tab.vue │ │ │ │ │ ├── library-tab.vue │ │ │ │ │ ├── library-table.vue │ │ │ │ │ ├── relation-chips.vue │ │ │ │ │ ├── stats-tab.vue │ │ │ │ │ ├── stats-table.vue │ │ │ │ │ ├── tabs.vue │ │ │ │ │ └── user-tab.vue │ │ │ │ └── use-now-timer.js │ │ │ ├── anchors.scss │ │ │ ├── auth/ │ │ │ │ ├── auth-form-mixin.js │ │ │ │ ├── auth-menu.vue │ │ │ │ ├── auth-token.vue │ │ │ │ ├── change-password-dialog.vue │ │ │ │ └── login-dialog.vue │ │ │ ├── banner.vue │ │ │ ├── book-cover.scss │ │ │ ├── book-cover.vue │ │ │ ├── browser/ │ │ │ │ ├── browser-header.vue │ │ │ │ ├── card/ │ │ │ │ │ ├── browser-card-menu.vue │ │ │ │ │ ├── card.vue │ │ │ │ │ ├── controls.vue │ │ │ │ │ ├── order-by-caption.vue │ │ │ │ │ └── subtitle.vue │ │ │ │ ├── drawer/ │ │ │ │ │ ├── browser-settings-covers.vue │ │ │ │ │ ├── browser-settings-drawer.vue │ │ │ │ │ ├── browser-settings-group.vue │ │ │ │ │ ├── browser-settings-misc.vue │ │ │ │ │ ├── browser-settings-panel.vue │ │ │ │ │ └── browser-settings-saved.vue │ │ │ │ ├── empty.vue │ │ │ │ ├── filter-warning-snackbar.vue │ │ │ │ ├── main.vue │ │ │ │ └── toolbars/ │ │ │ │ ├── breadcrumbs/ │ │ │ │ │ ├── breadcrumbs.vue │ │ │ │ │ └── browser-toolbar-breadcrumbs.vue │ │ │ │ ├── browser-toolbar-title.vue │ │ │ │ ├── nav/ │ │ │ │ │ ├── browser-nav-button.vue │ │ │ │ │ └── browser-toolbar-nav.vue │ │ │ │ ├── search/ │ │ │ │ │ ├── browser-toolbar-search.vue │ │ │ │ │ ├── search-combobox.vue │ │ │ │ │ ├── search-help-text.vue │ │ │ │ │ └── search-help.vue │ │ │ │ ├── select-many/ │ │ │ │ │ └── browser-toolbar-select-many.vue │ │ │ │ └── top/ │ │ │ │ ├── browser-toolbar-top.vue │ │ │ │ ├── filter-by-select.vue │ │ │ │ ├── filter-sub-menu.vue │ │ │ │ ├── order-by-select.vue │ │ │ │ ├── order-reverse-button.vue │ │ │ │ ├── search-button.vue │ │ │ │ ├── toolbar-button.vue │ │ │ │ └── top-group-select.vue │ │ │ ├── cancel-button.vue │ │ │ ├── clipboard.vue │ │ │ ├── close-button.vue │ │ │ ├── codex-list-item.vue │ │ │ ├── confirm-dialog.vue │ │ │ ├── confirm-footer.vue │ │ │ ├── download-button.vue │ │ │ ├── empty.vue │ │ │ ├── mark-read-button.vue │ │ │ ├── metadata/ │ │ │ │ ├── expand-button.vue │ │ │ │ ├── metadata-activator.vue │ │ │ │ ├── metadata-body.vue │ │ │ │ ├── metadata-chip.vue │ │ │ │ ├── metadata-controls.vue │ │ │ │ ├── metadata-cover.vue │ │ │ │ ├── metadata-dialog.vue │ │ │ │ ├── metadata-header.vue │ │ │ │ ├── metadata-ratings.vue │ │ │ │ ├── metadata-tags.vue │ │ │ │ ├── metadata-text.vue │ │ │ │ ├── table.scss │ │ │ │ └── tags-table.vue │ │ │ ├── pagination-nav-button.vue │ │ │ ├── pagination-slider.vue │ │ │ ├── pagination-toolbar.vue │ │ │ ├── placeholder-loading.vue │ │ │ ├── reader/ │ │ │ │ ├── book-change-activator.vue │ │ │ │ ├── book-change-drawer.vue │ │ │ │ ├── books-window.vue │ │ │ │ ├── change-column.scss │ │ │ │ ├── drawer/ │ │ │ │ │ ├── download-panel.vue │ │ │ │ │ ├── keyboard-shortcuts-panel.vue │ │ │ │ │ ├── keyboard-shortcuts-table.vue │ │ │ │ │ ├── reader-settings-controls.vue │ │ │ │ │ ├── reader-settings-drawer.vue │ │ │ │ │ ├── reader-settings-panel.vue │ │ │ │ │ ├── reader-settings-reader.vue │ │ │ │ │ └── reader-settings-scope.vue │ │ │ │ ├── empty.vue │ │ │ │ ├── pager/ │ │ │ │ │ ├── horizontal-pages.vue │ │ │ │ │ ├── page/ │ │ │ │ │ │ ├── page-error.vue │ │ │ │ │ │ ├── page-img.vue │ │ │ │ │ │ ├── page-loading.vue │ │ │ │ │ │ └── page.vue │ │ │ │ │ ├── page-change-link.vue │ │ │ │ │ ├── pager-full-pdf.vue │ │ │ │ │ ├── pager-horizontal.vue │ │ │ │ │ ├── pager-vertical.vue │ │ │ │ │ ├── pager.vue │ │ │ │ │ ├── pdf-doc.vue │ │ │ │ │ └── scale-for-scroll.vue │ │ │ │ └── toolbars/ │ │ │ │ ├── nav/ │ │ │ │ │ ├── reader-book-change-nav-button.vue │ │ │ │ │ ├── reader-nav-button.vue │ │ │ │ │ └── reader-toolbar-nav.vue │ │ │ │ └── top/ │ │ │ │ ├── reader-arc-select.vue │ │ │ │ └── reader-toolbar-top.vue │ │ │ ├── scale-button.vue │ │ │ ├── settings/ │ │ │ │ ├── button.vue │ │ │ │ ├── docs-footer.vue │ │ │ │ ├── opds-dialog.vue │ │ │ │ ├── opds-url.vue │ │ │ │ ├── settings-drawer.vue │ │ │ │ └── version-footer.vue │ │ │ ├── submit-footer.vue │ │ │ ├── toolbar-select.vue │ │ │ └── unauthorized.vue │ │ ├── datetime.js │ │ ├── http-error.vue │ │ ├── main.js │ │ ├── platform.js │ │ ├── plugins/ │ │ │ ├── drag-scroll.js │ │ │ ├── router.js │ │ │ └── vuetify.js │ │ ├── reader.vue │ │ ├── route.js │ │ ├── stores/ │ │ │ ├── admin.js │ │ │ ├── auth.js │ │ │ ├── browser-select-many.js │ │ │ ├── browser.js │ │ │ ├── common.js │ │ │ ├── metadata.js │ │ │ ├── reader.js │ │ │ ├── socket.js │ │ │ └── store.js │ │ └── util.js │ ├── tests/ │ │ └── unit/ │ │ └── reader-nav-button.test.js │ └── vite.config.js ├── mkdocs.yml ├── mock_comics/ │ ├── __init__.py │ ├── bigbook.py │ ├── mock_comics.py │ └── mock_comics.sh ├── nginx/ │ └── default.conf ├── package.json ├── pyproject.toml ├── tests/ │ ├── README.md │ ├── __init__.py │ ├── files/ │ │ ├── comicbox-2-example.cbz │ │ ├── comicbox-2-update.cbz │ │ ├── comicbox.example.yaml │ │ └── comicbox.update.yaml │ ├── importer/ │ │ ├── __init__.py │ │ ├── test_basic.py │ │ ├── test_update_all.py │ │ └── test_update_none.py │ ├── nginx-local-codex.conf │ ├── test_asgi.py │ └── test_models.py └── vulture_ignorelist.py ================================================ FILE CONTENTS ================================================ ================================================ FILE: .circleci/config.yml ================================================ executors: amd64-medium-executor: machine: image: ubuntu-2404:current resource_class: medium arm64-medium-executor: machine: image: ubuntu-2404:current resource_class: arm.medium orbs: discord: antonioned/discord@0.1.0 advanced-checkout: vsco/advanced-checkout@1.1.0 jobs: build-base-amd64: &build-base executor: amd64-medium-executor steps: - advanced-checkout/shallow-checkout - run: command: . ./ci/machine-init.sh base name: Update packages & docker, produce env file. - run: command: ./ci/docker-build-image.sh codex-base clean name: Build Base Image - run: command: ./ci/docker-build-image.sh codex-builder-base clean name: Build Builder Base Image - persist_to_workspace: paths: - ./.env-* - ./.venv root: . - discord/status: fail_only: true webhook: "${DISCORD_STATUS_WEBHOOK}" failure_message: "**${CIRCLE_USERNAME}**'s build: **${CIRCLE_JOB}** failed." build-base-arm64: <<: *build-base executor: arm64-medium-executor test-and-build-dist-amd64: executor: amd64-medium-executor steps: - advanced-checkout/shallow-checkout - attach_workspace: at: . - run: command: ./ci/machine-init.sh dist-builder name: Update packages & docker, produce env file. - run: command: ./ci/docker-build-image.sh codex-dist-builder pull name: Build Builder Base Image - run: command: ./ci/docker-compose-exit.sh codex-lint name: Lint - run: command: ./ci/docker-compose-exit.sh codex-frontend-test name: "Frontend: Test" - store_test_results: path: test-results/jest - store_artifacts: path: frontend/coverage - run: command: ./ci/docker-compose-exit.sh codex-frontend-build name: "Frontend: Build" - run: command: ./ci/docker-compose-exit.sh codex-backend-test name: "Backend: Test" - store_test_results: path: test-results/pytest - store_artifacts: path: test-results/coverage - run: command: ./ci/docker-compose-exit.sh codex-build-dist name: Build Distribution - run: command: sudo chown -R circleci:circleci dist name: chown dist - persist_to_workspace: paths: - ./dist root: . - discord/status: fail_only: false webhook: "${DISCORD_STATUS_WEBHOOK}" failure_message: "**${CIRCLE_USERNAME}**'s build: **${CIRCLE_JOB}** failed." build-amd64: &build executor: amd64-medium-executor steps: - advanced-checkout/shallow-checkout - attach_workspace: at: . - run: command: ./ci/machine-init.sh name: Update packages & docker, produce env file. - run: command: ./ci/docker-build-image.sh codex-arch name: Build Codex Runnable Image - discord/status: fail_only: true webhook: "${DISCORD_STATUS_WEBHOOK}" failure_message: "**${CIRCLE_USERNAME}**'s build: **${CIRCLE_JOB}** failed." success_message: "**${CIRCLE_USERNAME}**'s build: **${CIRCLE_JOB}** built." build-arm64: <<: *build executor: arm64-medium-executor deploy: executor: amd64-medium-executor steps: - advanced-checkout/shallow-checkout - attach_workspace: at: . - run: command: ./ci/machine-init.sh name: Update packages & docker, produce env file. - run: command: ./ci/docker-push.sh name: Push multi-arch images to Docker Hub - run: command: echo Disabled PyPI push # ./ci/python-publish.sh name: Publish Codex Package to PyPI - discord/status: fail_only: false webhook: "${DISCORD_STATUS_WEBHOOK}" failure_message: "**${CIRCLE_USERNAME}**'s build: **${CIRCLE_JOB}** failed." success_message: "**${CIRCLE_USERNAME}**'s build: **${CIRCLE_JOB}** deployed." version: 2.1 workflows: main: jobs: - build-base-amd64: &filters-all filters: branches: only: - main - build-base-arm64: &filters-release filters: branches: only: - main - test-and-build-dist-amd64: <<: *filters-all requires: - build-base-amd64 - build-amd64: <<: *filters-release requires: - build-base-amd64 - test-and-build-dist-amd64 - build-arm64: <<: *filters-release requires: - build-base-arm64 - test-and-build-dist-amd64 - deploy: <<: *filters-release requires: - build-amd64 - build-arm64 ================================================ FILE: .dockerignore ================================================ __pycache__ !dist !docker/debian.sources .*cache .circleci .claude .coverage* .docker-token .DS_Store .env* .eslintcache .ghrc-token .git .mypy_cache .picopt_timestamp .picopt_treestamps.yaml .pypi-token .pytest_cache .ropeproject .ruff_cache .venv* *.egg-info *.py[co] *~ *Dockerfile cache codex/static codex/static_build comics config dev* docker-compose* docker* MANIFEST mock_comics monkeytype.sqlite3 NEWS node_modules test-results TODO.md update-builder-requirement.sh version.sh ================================================ FILE: .github/workflows/ci.yml ================================================ name: CI on: push: branches: [main] pull_request: types: [opened, synchronize] branches: [main, develop] env: IMAGE: ghcr.io/${{ github.repository }} BUILDER_IMAGE: ghcr.io/${{ github.repository }}/cache:ci FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true" concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: # --------------------------------------------------------------------------- # Lint, test, and build the Python wheel (amd64 only) # --------------------------------------------------------------------------- test: name: Lint, Test & Build Dist runs-on: ubuntu-24.04 if: | github.ref_name == 'main' && github.event_name == 'push' || github.base_ref == 'main' || (github.base_ref == 'develop' && github.head_ref == 'pre-release') permissions: contents: read packages: write checks: write steps: - name: Checkout uses: actions/checkout@v6 with: # Only fetch full history for the main push gate fetch-depth: ${{ (github.ref_name == 'main' && github.event_name == 'push') && 0 || 1 }} - name: Check for Redundant Tests & Existing Dist id: gate if: github.ref_name == 'main' && github.event_name == 'push' env: GH_REPO: ${{ github.repository }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: bin/ci-download-dist-if-identical.sh - name: Set up Docker Buildx if: steps.gate.outputs.dist_found != 'true' uses: docker/setup-buildx-action@v4 - name: Log in to GHCR if: steps.gate.outputs.dist_found != 'true' uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build dist-builder image id: build-ci if: steps.gate.outputs.dist_found != 'true' uses: docker/build-push-action@v7 with: context: . target: codex-ci load: true tags: codex-ci:ci cache-from: type=registry,ref=${{ env.BUILDER_IMAGE }} cache-to: type=registry,ref=${{ env.BUILDER_IMAGE }},mode=max - name: Start Services id: start-services if: steps.build-ci.outcome == 'success' run: | mkdir -p dist test-results docker compose up -d ci - name: Lint id: lint if: steps.start-services.outcome == 'success' run: docker exec codex-ci make lint - name: Build for Tests & Dist id: build-for-tests if: steps.lint.outcome == 'success' run: docker exec codex-ci make build-choices build-frontend collectstatic - name: Tests id: tests if: steps.build-for-tests.outcome == 'success' run: docker exec codex-ci make test-frontend django-check test-python -o build-choices - name: Upload Test Results id: upload-tests if: "!cancelled() && steps.tests.outcome != 'skipped'" uses: actions/upload-artifact@v7 with: name: test-results path: "**/test-results/pytest/*.xml" - name: Publish Test Report if: "!cancelled() && steps.upload-tests.outcome == 'success'" uses: mikepenz/action-junit-report@v6 with: report_paths: "**/test-results/pytest/*.xml" - name: Build Distribution id: build-dist if: steps.tests.outcome == 'success' run: docker exec codex-ci make build-only - name: Stop Services if: steps.start-services.outcome == 'success' run: | touch .env.package # hack because down is too promiscuous docker compose down ci - name: Upload dist artifact if: "!cancelled() && steps.gate.outputs.dist_found == 'true' || steps.build-dist.outcome =='success'" uses: actions/upload-artifact@v7 with: name: python-dist path: dist/ retention-days: 2 - name: Discord notification (Test Results) if: "!cancelled()" uses: sarisia/actions-status-discord@v1 with: webhook: ${{ secrets.DISCORD_WEBHOOK }} status: ${{ job.status }} title: Lint & Test description: "Tests for ${{ github.ref_name }} #${{ github.run_number }} ${{ job.status == 'success' && 'passed' || 'FAILED' }}" color: ${{ job.status == 'success' && '0x28a745' || '0xd73a49' }} url: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" # --------------------------------------------------------------------------- # Build per-arch Docker images natively # --------------------------------------------------------------------------- build: name: Build Image (${{ matrix.arch }}) needs: test if: github.event_name == 'push' || (github.base_ref == 'develop' && github.head_ref == 'pre-release') strategy: matrix: include: - arch: amd64 runner: ubuntu-24.04 - arch: arm64 runner: ubuntu-24.04-arm runs-on: ${{ matrix.runner }} permissions: contents: read packages: write steps: - name: Checkout uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v4 - name: Download dist artifact uses: actions/download-artifact@v8 with: name: python-dist path: dist/ - name: Set Build Context run: | VERSION=$(grep -Po '(?<=^version = ")[^"]+' pyproject.toml) echo "CODEX_VERSION=${VERSION}" >> "$GITHUB_ENV" echo "CODEX_WHEEL=$(find dist -name '*.whl' -print0 | xargs basename)" >> "$GITHUB_ENV" - name: Log in to GHCR uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Build and push uses: docker/build-push-action@v7 with: context: . platforms: linux/${{ matrix.arch }} push: true build-args: | CODEX_VERSION=${{ env.CODEX_VERSION }} CODEX_WHEEL=${{ env.CODEX_WHEEL }} tags: ${{ env.IMAGE }}:${{ env.CODEX_VERSION }}-${{ matrix.arch }} cache-from: type=gha,scope=build-${{ matrix.arch }} cache-to: type=gha,scope=build-${{ matrix.arch }},mode=max # --------------------------------------------------------------------------- # Create multi-arch manifest and publish # --------------------------------------------------------------------------- deploy: name: Deploy needs: build runs-on: ubuntu-24.04 permissions: contents: read packages: write steps: - name: Checkout uses: actions/checkout@v6 - name: Setup Docker Buildx uses: docker/setup-buildx-action@v4 - name: Setup uv uses: astral-sh/setup-uv@v8.0.0 - name: Download dist artifact uses: actions/download-artifact@v8 with: name: python-dist path: dist/ - name: Log in to GHCR uses: docker/login-action@v4 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Create and push manifest run: | VERSION=$(uv version --short) TAG_ARGS=("-t" "${IMAGE}:${VERSION}") # Positive regex check for Final versions (e.g., 1.2.3) if [[ $VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then TAG_ARGS+=("-t" "${IMAGE}:latest") fi docker buildx imagetools create \ --annotation "index:org.opencontainers.image.title=Codex" \ --annotation "index:org.opencontainers.image.description=Codex Comic Server" \ --annotation "index:org.opencontainers.image.version=${VERSION}" \ --annotation "index:org.opencontainers.image.authors=AJ Slater " \ --annotation "index:org.opencontainers.image.url=https://codex-reader.app" \ --annotation "index:org.opencontainers.image.source=https://github.com/ajslater/codex" \ --annotation "index:org.opencontainers.image.licenses=GPL-3.0-only" \ "${TAG_ARGS[@]}" \ "${IMAGE}:${VERSION}-amd64" \ "${IMAGE}:${VERSION}-arm64" - name: Publish to PyPI run: uv publish dist/* env: UV_PUBLISH_TOKEN: ${{ secrets.PYPI_TOKEN }} - name: Discord notification (Deploy Results) if: "!cancelled()" uses: sarisia/actions-status-discord@v1 with: webhook: ${{ secrets.DISCORD_WEBHOOK }} status: ${{ job.status }} title: Deploy description: "Deploy ${{ github.ref_name }} #${{ github.run_number }} ${{ job.status == 'success' && 'succeeded' || 'FAILED' }}" color: ${{ job.status == 'success' && '0x28a745' || '0xd73a49' }} url: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" ================================================ FILE: .gitignore ================================================ __pycache__/ __pypackages__/ __snapshots__ .*cache .claude .coverage .coverage.* .coverage* .dmypy.json .docker-token .DS_Store .eggs/ .env .env-* .env.package .env.pushover .eslintcache .ghrc-token .hypothesis/ .installed.cfg .ipynb_checkpoints .mypy_cache/ .nox/ .npm .pypi-token .pypirc .pyre/ .pytest_cache/ .Python .python-version .ropeproject .ruff_cache/ .scrapy .spyderproject .spyproject .tox/ .uv-publish-env .venv* .webassets-cache *.cover *.egg *.egg-info/ *.log *.manifest *.mo *.pot *.py,cover *.py[cod] *.sage.py *.so *.spec *~ *$py.class build build/ celerybeat-schedule celerybeat.pid codex/static_build/ codex/static/ comics config coverage.xml develop-eggs/ dist dist/ dmypy.json docs/_build/ docs/site downloads/ eggs/ env.bak/ env/ ENV/ frontend/components.d.ts frontend/coverage frontend/src/choices/ htmlcov/ instance/ ipython_config.py jspm_packages/ lib/ lib64/ local_settings.py MANIFEST monkeytype.sqlite3 node_modules node_modules/ nosetests.xml parts/ pip-delete-this-directory.txt pip-log.txt pip-wheel-metadata/ profile_default/ sdist/ share/python-wheels/ target/ test-results TODO.md var/ venv.bak/ venv/ wheels/ ================================================ FILE: .picopt_treestamps.yaml ================================================ config: bigger: false convert_to: [] formats: - GIF - JPEG - PNG - WEBP ignore: [] keep_metadata: true recurse: true symlinks: true .: 1657150080.593595 ================================================ FILE: .prettierignore ================================================ __pycache__ .*cache .*cache/ .circleci .claude .git .mypy_cache .pytest_cache .ruff_cache .venv .venv* *Dockerfile cache codex/_vendor codex/static codex/static_build codex/static_src/img/*.svg codex/templates/**/*.html codex/templates/**/*.xml codex/templates/pwa/manifest.webmanifest comics config dist frontend node_modules package-lock.json test-results tests/**/*.json tests/**/*.xml tests/**/*.yaml tests/**/*.yml uv.lock ================================================ FILE: .readthedocs.yaml ================================================ build: os: ubuntu-24.04 tools: python: "3" mkdocs: configuration: mkdocs.yml python: install: - requirements: docs/requirements.txt version: 2 ================================================ FILE: .shellcheckrc ================================================ external-sources=true ================================================ FILE: CLAUDE.md ================================================ # CLAUDE.md Codex ## Project Overview Codex is a comic archive web server: Django 6 backend, Vue 3 frontend, SQLite database, WebSocket status updates. Users browse and read comics (CBZ, CBR, PDF) through a responsive web UI. A background librarian daemon watches the filesystem for changes and manages metadata import, cover generation, and search indexing. ## Commands Commands are from @\~/.claude/python-devenv.md ### Local Commands Non exhaustive list of commands specific to this repository ```bash make install # Install all dependencies (Python + Node) make build-frontend # Vite production build make collectstatic # Django collectstatic make build-only # Build Python wheel (no frontend) make build-choices # Generate choices JSON from Django enums ``` ## Architecture ### Backend (`/codex/`) Django app served by Granian (ASGI). Key subsystems: - **`models/`** — ORM models: Comic, Series, Publisher, Imprint, Library, Bookmark, Identifier. SQLite with WAL mode. - **`views/`** — DRF ViewSets organized by feature: `browser/` (comic listings), `reader/` (page serving), `admin/` (CRUD), `opds/` (syndication). - **`urls/`** — API at `/api/v3/`. Sub-routers: `/auth/`, `/c/` (reader), `//` (browser), `/admin/`. - **`serializers/`** — DRF serializers for browser, reader, and admin responses. - **`librarian/`** — Multiprocessing background daemon with dedicated threads: - `CoverThread` — Generate/cache comic covers - `ScribeThread` — Metadata import, FTS5 search index sync - `LibraryPollerThread` / `LibraryWatcherThread` — Filesystem monitoring (polling + inotify) - `CronThread` — Scheduled tasks (auto-import, cleanup) - `BookmarkThread` — Persist user reading positions - `NotifierThread` — Broadcast status via WebSockets - **`websockets/`** — Django Channels consumers. Groups: `ALL` (everyone), `ADMIN` (staff). Broadcasts librarian task progress. - **`settings/`** — Config loaded from `/config/codex.toml` (TOML). Env vars: `DEBUG`, `CODEX_CONFIG_DIR`, `TIMEZONE`. - **`applications/`** — ASGI app layers (HTTP + WebSocket routing, lifespan). - **`run.py`** — Granian server entry point. ### Frontend (`/frontend/`) Vue 3 + Vite + Vuetify 4 SPA. - **`src/stores/`** — Pinia stores: `browser`, `reader`, `auth`, `metadata`, `socket`, `admin`. - **`src/api/v3/`** — HTTP client (xior) with automatic CSRF token injection. - **`src/components/`** — Organized by view: `browser/`, `reader/`, `admin/`, `metadata/`, `settings/`. - **`src/plugins/`** — Vue Router, Vuetify, drag-scroll. - **Routes:** `/` (home), `/:group/:pks/:page` (browser), `/c/:pk/:page` (reader), `/admin` (dashboard). - **Build output:** Vite builds to `/codex/static_build/`, then `collectstatic` copies to `/codex/static/`. ### Docker (`/Dockerfile`) Multi-stage build with targets: 1. **`runtime-base`** — Slim Debian with runtime libs only 2. **`builder`** — Python 3.14 + Node 24 + build tools 3. **`codex-ci`** — Builder + full source + dev deps (used in CI for lint/test/build) 4. **`wheel-installer`** — Installs compiled wheel, strips binaries 5. **`final`** (default) — Minimal production image. Exposes port 9810, volumes `/comics` and `/config`. ### CI (`.github/workflows/ci.yml`) Single workflow with three jobs: `test` -> `build` -> `deploy`. The `test` job builds the `codex-ci` Docker target, runs lint/test/build inside it via `docker exec`. The `build` job creates per-arch production images (amd64 + arm64). The `deploy` job creates a multi-arch manifest and publishes to GHCR + PyPI. ### Makefile Structure `/Makefile` includes fragments from `/cfg/*.mk`. These are managed by the sibling `cfg` boilerplate system. Key fragments: `codex.mk`, `django.mk`, `frontend.mk`, `python.mk`, `docker.mk`, `ci.mk`. ### Key Libraries - **Backend:** Django 6, Channels 4.2, DRF 3.16, Granian 2.7, comicbox (comic parsing), Pillow, django-cachalot, loguru - **Frontend:** Vue 3.5, Vite 8, Vuetify 4, Pinia 3, xior - **Database:** SQLite + WAL + FTS5 full-text search ## Project-Specific Conventions - Released as both a Python wheel (PyPI) and Docker image (GHCR). - Config file is TOML (`codex.toml`), not env vars. - Browser API groups comics by: publisher, series, folder, arc, volume — the `group` URL param selects which. - Choices/enums are shared between frontend and backend via generated JSON (`make build-choices`). - The `compose.yaml` `ci` service mirrors the CI Docker build for local testing. ## Linting & Testing Uses @\~/.claude/python-devenv.md - **Python:** pytest with Django test runner. Results in `test-results/pytest/`. - **Frontend:** vitest. Results in `test-results/`. - **Lint:** Ruff (Python), ESLint + Prettier (JS/Vue), shellcheck, hadolint (Dockerfile). ================================================ FILE: Dockerfile ================================================ ############################################################################### # Multi-stage Dockerfile for Codex CI and production # # Targets: # codex-ci – CI image with all deps + source (lint, test, build wheel) # final – Slim production image (default) # # Usage: # CI: docker build --target codex-ci -t codex-ci:ci . # docker run codex-ci:ci make lint # Prod: docker build --build-arg CODEX_WHEEL=codex-X.Y.Z-py3-none-any.whl \ # --build-arg CODEX_VERSION=X.Y.Z . ############################################################################### # ---- Stage 1: runtime-base (slim, no build tools) -------------------------- FROM ghcr.io/ajslater/python-debian:3.14.4-slim-trixie_0 AS runtime-base COPY ci/debian.sources /etc/apt/sources.list.d/ # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ curl \ libimagequant0 \ libjpeg62-turbo \ libopenjp2-7 \ libssl3 \ libyaml-0-2 \ libtiff6 \ libwebp7 \ ruamel.yaml.clib \ unrar \ zlib1g \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # ---- Stage 2: builder (build tools + Node for compilation) ----------------- FROM nikolaik/python-nodejs:python3.14-nodejs24 AS builder # nodejs25 blocked on bug https://github.com/nodejs/node/issues/60303 COPY ci/debian.sources /etc/apt/sources.list.d/ # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ bash \ build-essential \ cmake \ git \ libimagequant0 \ libjpeg62-turbo \ libopenjp2-7 \ libssl3 \ libyaml-0-2 \ libtiff6 \ libwebp7 \ python3-dev \ ruamel.yaml.clib \ unrar \ zlib1g \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # hadolint ignore=DL3013,DL3042 RUN pip3 install --no-cache --upgrade pip # ---- Stage 3: codex-ci (all deps + source for CI) ------------------------- FROM oven/bun:latest AS bun-source FROM builder AS codex-ci # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ shellcheck \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* COPY --from=bun-source /usr/local/bin/bun /usr/local/bin/bun COPY --from=bun-source /usr/local/bin/bunx /usr/local/bin/bunx WORKDIR /app # Python deps (cacheable when lockfiles unchanged) COPY pyproject.toml uv.lock ./ # hadolint ignore=DL3042 RUN PIP_CACHE_DIR=$(pip3 cache dir) PYMUPDF_SETUP_PY_LIMITED_API=0 \ uv sync --no-install-project --no-dev --group lint --group test # Root Node deps (eslint, prettier, etc.) COPY package.json bun.lock ./ RUN bun install # Frontend Node deps WORKDIR /app/frontend COPY frontend/package.json frontend/bun.lock ./ RUN bun install # Full source WORKDIR /app COPY . . VOLUME /app/codex/static_build VOLUME /app/codex/static VOLUME /app/dist VOLUME /app/test-results VOLUME /app/frontend/src/choices # ---- Stage 4: wheel-installer (compile native extensions) ------------------ FROM builder AS wheel-installer ARG CODEX_WHEEL COPY dist/${CODEX_WHEEL} /tmp/${CODEX_WHEEL} # hadolint ignore=DL3059,DL3013 RUN PYMUPDF_SETUP_PY_LIMITED_API=0 pip3 install --no-cache-dir /tmp/${CODEX_WHEEL} # Slim down /usr/local before it gets copied to the final image # hadolint ignore=DL3059 RUN set -eux \ # Remove pip, setuptools, wheel — not needed at runtime && pip3 uninstall -y pip setuptools wheel 2>/dev/null || true \ && rm -rf /usr/local/bin/pip* \ # Strip debug symbols from shared libraries (~30-50% size reduction on .so files) && find /usr/local -name '*.so' -exec strip --strip-unneeded {} + 2>/dev/null || true \ && find /usr/local -name '*.so.*' -exec strip --strip-unneeded {} + 2>/dev/null || true \ # Remove Python bytecode caches (regenerated on first import) && find /usr/local -type d -name '__pycache__' -exec rm -rf {} + 2>/dev/null || true \ && find /usr/local -name '*.pyc' -delete 2>/dev/null || true \ # Remove the stdlib test suite (~30MB) — safe, never needed at runtime && rm -rf /usr/local/lib/python*/test \ && rm -rf /usr/local/lib/python*/idlelib \ && rm -rf /usr/local/lib/python*/ensurepip \ # Remove type stubs — only used by type checkers && find /usr/local -name '*.pyi' -delete 2>/dev/null || true \ # Remove the installed wheel && rm -f /tmp/${CODEX_WHEEL} # ---- Stage 5: final (production image) ------------------------------------ FROM runtime-base AS final ARG CODEX_VERSION LABEL org.opencontainers.image.title="Codex" \ org.opencontainers.image.description="Codex Comic Server" \ org.opencontainers.image.version="${CODEX_VERSION}" \ org.opencontainers.image.authors="AJ Slater " \ org.opencontainers.image.url="https://codex-reader.app" \ org.opencontainers.image.source="https://github.com/ajslater/codex" \ org.opencontainers.image.licenses="GPL-3.0-only" RUN mkdir -p /comics && touch /comics/DOCKER_UNMOUNTED_VOLUME RUN mkdir -p /home/abc/.config/comicbox \ && chown -R abc /home/abc/.config \ && chmod 777 /home/abc/.config /home/abc/.config/comicbox COPY --from=wheel-installer /usr/local /usr/local VOLUME /comics VOLUME /config EXPOSE 9810 CMD ["/usr/local/bin/codex"] ================================================ FILE: LICENSE ================================================ GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The GNU General Public License is a free, copyleft license for software and other kinds of works. The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. The precise terms and conditions for copying, distribution and modification follow. TERMS AND CONDITIONS 0. Definitions. "This License" refers to version 3 of the GNU General Public License. "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. A "covered work" means either the unmodified Program or a work based on the Program. To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. 1. Source Code. The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. A "Standard Interface" means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. The "System Libraries" of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A "Major Component", in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. The "Corresponding Source" for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. The Corresponding Source for a work in source code form is that same work. 2. Basic Permissions. All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. 3. Protecting Users' Legal Rights From Anti-Circumvention Law. No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. 4. Conveying Verbatim Copies. You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. 5. Conveying Modified Source Versions. You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: a) The work must carry prominent notices stating that you modified it, and giving a relevant date. b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to "keep intact all notices". c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an "aggregate" if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. 6. Conveying Non-Source Forms. You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. A "User Product" is either (1) a "consumer product", which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, "normally used" refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. "Installation Information" for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. 7. Additional Terms. "Additional permissions" are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or d) Limiting the use for publicity purposes of names of licensors or authors of the material; or e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. All other non-permissive additional terms are considered "further restrictions" within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. 8. Termination. You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. 9. Acceptance Not Required for Having Copies. You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. 10. Automatic Licensing of Downstream Recipients. Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. An "entity transaction" is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. 11. Patents. A "contributor" is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's "contributor version". A contributor's "essential patent claims" are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, "control" includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. In the following three paragraphs, a "patent license" is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To "grant" such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. "Knowingly relying" means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. A patent license is "discriminatory" if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. 12. No Surrender of Others' Freedom. If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. 13. Use with the GNU Affero General Public License. Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. 14. Revised Versions of this License. The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. 15. Disclaimer of Warranty. THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. Limitation of Liability. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. 17. Interpretation of Sections 15 and 16. If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. If the program does terminal interaction, make it output a short notice like this when it starts in an interactive mode: Copyright (C) This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, your program's commands might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see . The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . ================================================ FILE: Makefile ================================================ SHELL := /usr/bin/env bash include cfg/codex.mk include cfg/django.mk include cfg/frontend.mk include cfg/python.mk include cfg/docker.mk include cfg/ci.mk include cfg/docs.mk include cfg/node.mk include cfg/node_root.mk include cfg/common.mk include cfg/help.mk .PHONY: all ================================================ FILE: NEWS.md ================================================ # 📜 Codex News ## v1.10.12 - Fixes - Fix an occasional bug linking folders when importing. - Fix updating comics crash by batching comic updates. - Fix possible import crashes in delete & linking. - Fix overzealous lazy importer importing tags from books that already had tags. - Fix reader settings clearing anomalies. Add a global reader settings clear button. - Fix unnecessary redirect in OPDS when viewing comic in extended metadata. - Features - Codex identifies itself in the HTTP Server: header. - Codex identifies itself in the OPDS v1 tag. - Adjusted importer batching variable defaults for greater throughput ## v1.10.11 - Fixes - Custom covers were not importing. - Custom group covers were not counted in admin library view. ## v1.10.10 - Fixes - Fix creating non-extant reader global settings. ## v1.10.9 - Fixes - Fix browser settings not clearing with clear button. - Fix search settings not clearing on OPDS start page. ## v1.10.8 - Fixes - Reset settings on OPDS start page more consistently. - More logging for webserver errors. - There's a jump in version numbers because the production build process was broken. ## v1.10.4-7 - Broken releases ## v1.10.3 - Fixes - Poll all libraries button was not polling all libraries. - Fix OPDS v2 manifest series link. - Force OPDS v2 start link to reset filters and order - Library server folder picker shows folder menu more consistently. - Features - Select many mode in browser for browser actions. ## v1.10.2 - Fixes - Fix many OPDS v2 links leading to the wrong views. - Fix admin update user validation. ## v1.10.1 - Mistaken release ## v1.10.0 Rust modules & Settings Features - 🚨 Big Changes 🚨 - The docker repo has changed to ghcr.io/ajslater/codex - Django 6 requires python >=3.12. - Unified configuration: - Your hypercorn.toml config will be migrated automatically to codex.toml. Options previously only configurable with environment variables may now be also be specified in codex.toml. - Some of the environment variable names have changed, but old values are still respected for compatibility. - Replaced the thumbnail filename algorithm. Thumbnails will regenerate. - Fixes - Fix old dynamic covers displaying when database had changed due to imports. - Fix erroneous "Bookmark" filter appearing in filter menu when a read state is selected. - Fix reader close button receiving nonsensical route. - Fix job status messages sometimes not being updated correctly. - Fix check latest version of Codex bug. - Tags page sometimes had erroneous links went nowhere. - Fixed some foreign key integrity fixing bugs. - Features - Browser can save and load named views. - Reader Settings are now available for Series, Parent Folder and Story Arc - The Admin Jobs tab is an enhanced tasks tab with start and stop buttons and job progress. - Browser Pane refreshes with a click/touch pull control. - Performance Improvements - Codex now uses the Granian http server instead of Hypercorn. - The file watcher and poller now use one thread each no matter how many libraries you have. - Shaved about 80MB off the compressed docker image and 15MB off the python wheel. ## v1.9.24 - Fixes - Fix OPDS v2 numberOfItems pagination sum. ## v1.9.23 - Fixes - Fix admin tasks tab crash - Fix Library folder picker showing bad data ## v1.9.22 - Fixes - Fix many batch jobs not running. Including polling and importing. ## v1.9.21 - Fixes - Fix OPDS v2 manifest crash ## v1.9.20 - Features - Browser filters now show selected filters more clearly in the menus. - Fix filtering on credits when clicking on metadata chips. - OPDS v2 credits and subjects now link to views filtered on the tag. ## v1.9.19 - Fixes - Fix PDF date parsing bug on import. - Fix comic names with null volume_to field. - Provide volume name in comic title for OPDS v2 as there's no volume view available. - Fix OPDS v2 folder as collection naming. - Dev Notes - Vuetify 4.0 frontend ## v1.9.18 - Fixes - Fix search indexing the universes tag. ## v1.9.17 Broken build ## v1.9.16 - Fixes: - Fix broken Reader close button. - Fix OPDS v2 progression crash ## v1.9.15 - Fixes: - Fix OPDS v2 progression crash. - Adjust OPDS v2 progression position field to be 1 based, while Codex page is 0 based. ## v1.9.14 - Fixes: - Fix bad href validation in OPDS v2 leading to null links and therefore invalid OPDS. ## v1.9.13 - Change: - Codex only accepts auth tokens prefixed with the Bearer header. - Fixes: - Fix crash on API schema generation. - Fix scribe daemon not shutting down properly. - Fix Swagger API docs crash. - Fix full text search barfing on single quoted terms. - Return 401 and authorization v1 json to OPDS instead of 403 on auth failure more often. - Features: - OPDS order by filename. ## v1.9.12 - Fixes: - Fix accepting OPDS v2 progression correct Content-Type. - Fix parsing OPDS v2 progression position. - Fix bookmark update crash. ## v1.9.11 - Fixes: - Fix OPDS v1 pagination. ## v1.9.10 - Fixes - Fix Remote-User authorization for proxy setting http headers. - Fix order links not appearing in OPDS v1 - Feature: - Allow "Bearer" header for token authentication in addition to "Token" ## v1.9.9 - Fixes - Access control bug would let users see comics in include groups they were not a part of. - Remove redundant top links from OPDS v2 facets group block. - Fixed a status crash. - Features - Remote-User HTTP header SSO support activated by an environment variable. See docs. - HTTP Header Auth Tokens are now accepted as authorization. Useful for OPDS and possibly some SSO setups. ## v1.9.8 - Bad release. Does not start. ## v1.9.7 - Fixes - Fix OPDS v2 manifest/reading crash. - Fix OPDS v2 search. ## v1.9.6 - Fixes - Fix OPDS feed crash. ## v1.9.5 - Fixes - Fix settings not resetting to default on OPDS start pages. - Isolate OPDS settings from web settings so they don't reset each other. ## v1.9.4 - Fixes - Fix unnecessary redirects in OPDS v1 & v2 search. ## v1.9.3 - Fixes - Unnecessary redirect going to OPDS v1 start page. - Fixed start page link in OPDS v2.0 ## v1.9.2 - Fixes - OPDS 2.0 homepage preview sections were showing the wrong queries. - OPDS 2.0 progression bookmarking was broken. Stump doesn't seem to be transmitting progression data yet, though. - Features - OPDS 1.0 gains a homepage similar to OPDS v2 - OPDS start page URLS are now different. See the OPDS popup dialog. ## v1.9.1 - Fixes - OPDS v2 fix crash on faceted views. ## v1.9.0 - OPDS 2.0 Redesign - Fixes - Fix crash in OPDS v2 feed. - Features - Redesigned OPDS v2 to work with Stump & Readest Clients - Docker healthcheck endpoint. See docs. ## v1.8.22 - Fixes - Fix reading books crash that crept into the last release. ## v1.8.21 - Fixes - Fix Close Book button linking to parent of desired route. ## v1.8.20 - Fixes - Fix dragging when zoomed in in reader when using mouse left button. - Fix weird spacing and borders on browser and reader toolbars. - Attempt to make Close Book route in reader more accurate. ## v1.8.19 - Fixes - Do not sanitize Library and Comic path names. Fixes an import crash on paths with dangerous looking strings in them. - Fix a crash when moving comics. ## v1.8.18 - Fixes - Filter and Search parameters were being applied after clearing. - Features - Read metadata from PDF embedded files. ## v1.8.17 - Features - Python 3.14 support. - Fixes - Fix for codex sometimes shutting when asked to restart. ## v1.8.16 - Don't support Python 3.14 until dependencies do. - Docker image is now based on trixie debian ## v1.8.15 - Performance - Replace axios with xior AJAX library. ## v1.8.14 - Fixes - Fix settings not saving in reader and sometimes in browser. - Fix shutdown and restart signals on Windows. ## v1.8.13 - Fixes - Fix import crash when metadata import is disabled. ## v1.8.12 - Fixes - Fix publishers and series tags disappearing. Use Library Force Update to restore them. - Fix credit role order in tags screen. ## v1.8.11 - Fixes - Restarting Codex with Codex would still crash. More comprehensive fix. ## v1.8.10 - Fixes - Restarting Codex with Codex would crash, so Auto-Update restarts were broke. - Sorry non-docker homies! Thanks @professionaltart. - Fix universes not being search indexed (very rare). - More reliable re-render for browser page slider and navigation buttons. ## v1.8.9 - Fixes - Attempt to fix not saving new stat values if update doesn't update comic metadata. - Attempt to fix import bug where update metadata is disposed of before it can be applied. - Attempt to fix browser page slider not updating to match the current folder. ## v1.8.8 - Fixes - Fix import crash on some Metron tagged comics with a primary identifier source. ## v1.8.7 - Fixes - Make Full Text Search table smaller. Some irrelevant fields and data were causing exponential bloat, crippling large libraries. - Fix search by field for string fields, identifiers and issue. - Fix overflow on browser card titles. ## v1.8.6 - Fixes - Scale search index sync batch size with memory. Large search sync query was preventing search sync of large databases on small machines. ## v1.8.5 - Fixes - Better fix for persistent settings. Old fix could lead to settings that never clear. ## v1.8.4 - Fixes - Fix not importing page_count when import metadata is off, leads to unreadable books. - Fix persistent settings. ## v1.8.3 - Fixes - Fix searching by string like fields in the search bar like 'path'. - Fix minor search update exception when there's nothing search related to update on import. ## v1.8.2 - Fixes - Force docker logs (all stdout logs) to colorize output. ## v1.8.1 - Fixes - Fix minor search update exception. ## v1.8.0 - Features - Support [MetronInfo.xml v1.0 Schema](https://metron-project.github.io/docs/category/metroninfo) for comic metadata. - Support CB7 (7zip) comic archives. - Search Indexing now also done in the import phase which is faster than syncing from the db: - 20k comics both imported and indexed in 9 minutes on my old macbook at a rate of 30 comics per second. Half a million comics might take less than 5 hours on the same machine. - However this update replaces the search index and Codex will sync the search index when it starts. - Fewer database updates when comics change metadata or don't change metadata at all means fewer and faster update imports. - New Tags: - Add Universes. Main Characters, and Main Team marked with a ★. - Metron tags can provide links to web databases for each tag. - New Import Metadata on Demand Admin Flag. - Add manual Import Abort task for admins. - Browser tab titles show route and page title, no longer show custom banner. - Logging format changed. - Fixes - Reading Order in reader was not always defaulting to your last Browser Top Group. - Fixed filtering on Critical Rating and FileType - Fixed searching on Critical Rating field. - Bug displaying Age Rating, Original Format, Scan Info, Tagger in metadata. - Fixed database locking crashes. - Attempt to fix major version change CSRF errors by forcing logout if CSRF errors occur. - Dev - Uses Comicbox 2.0 see [Comicbox NEWS for all details](https://github.com/ajslater/comicbox/blob/main/NEWS.md) ## v1.7.15 - Fixes - Fix unable to change top group to folders or story arcs if on root browse page. ## v1.7.14 - Fixes - Fix Reader jumping back into the same book when changing to a new book. ## v1.7.13 - Fixes - Fix Reader Reading Order dropdown unable to change reading order. - Fix loading root browser pages greater than 1 as a new route. - Missing books error screen had no toolbars, could not close it. - Fix docker configuration for Synology by creating a comicbox config directory. - Wait to load browser and reader pages until login tried with a cookie. - Features - Experimental `CODEX_BROWSER_MAX_OBJ_PER_PAGE` env variable. Default is 100. ## v1.7.12 - Fixes - Crash downloading comics when user is in a group. - User last active activity was often not recorded. - Features - Animate Pages option toggle in reader settings ## v1.7.11 - Fixes - Fix importing of story arc tags ## v1.7.10 - Fixes - Fix occasional import link tags crash. ## v1.7.9 - Fixes - Tags page layout fixes. ## v1.7.8 - Features - Tags page layout changes with more notes from @beville. - Fixes - Tags for simple comic fields were incorrectly taken from the first comic in a group. - Filter menu wasn't populating for top folder. - Librarian status tasks would appear out of order. ## v1.7.7 - Fixes - Fix Librarians Statuses not loading in UI. - Tags screen crash with untagged series or volumes. - Fix tags screen Mark Read confirmation names. - Order Tags Contributors by relevance not alphabetically. ## v1.7.6 - Features - Always Show Filenames setting for browser cards. - Tags page layout changes with notes from @beville. - Fixes - Fix import crash on files with escapable html characters or utf-8 chars in the filename. - Fix page range crash in reader. - Fix overzealous escaping of html in metadata strings. - Fix group tags not aggregated properly in metadata screen for folders and story arcs. ## v1.7.5 - Fixes - More granular websocket notifications means the web UI does fewer more targeted data updates. - Prevent deactivating and deprivileging the logged in user. - Fix resetting active flag when resetting admin user with environment variable. - Fix crash in auto-update when unable to determine current Codex version. ## v1.7.4 - Fixes - Fix setting bookmarks and book settings bug. - Sanitize HTML out of imported comic metadata fields and admin inputs. ## v1.7.3 - Features - Browser Order By Child Count. - Customized site title option in Admin Flags. - Codex logs are now compressed when rotated. - Support OPDS v2.0 Progression (streaming) draft proposal from Aldiko. - Fixes - Browser Group Mark Read/Unread obeys browser filters. - Fixed bad redirect when to deep linking into browser Folders or Story Arcs. - Fixed order by name for issues disregarding volume names in browser and reader - Fix display of name and filename on browser cards. - Fix batching import of Contributors to prevent crash on large imports. - Fixed an OPDS Metadata crash. ## v1.7.2 - Features - Download entire groups of comics from the browser and metadata screens. - Fixes - Fix reader page API sometimes crashing. ## v1.7.1 - Features - Always display filename for comics on browser cards in Folder View - Detect .jxl extension (JPEG XL) as a comic page. - Fixes - Fix ignoring MacOS resource forks in archives. - Don't redirect to issue view on first search in Folder View ## v1.7.0 - Features - Search - Use SQLite Full Text Search v5 for search engine. - Search syntax has changed. See the help popup at the end of the search bar. - Use faster db column lookups for some search bar queries. Thanks @bmfrosty. - You may remove the directory `config/whoosh_index`. - Integrity Checks - Faster more comprehensive db integrity checks run every night instead of at startup. - Integrity checks can run on startup with environment variables documented in README. - Fixes - Actually fix browser opening reader at correct bookmark. - Also fixes progress calculation on browser cards. - Fixed crashes when the upstream codex version is not accessible. - Fixed possible race conditions with nightly maintenance. ## v1.6.19 - Fixes - Fix browser opening reader at correct bookmark. - Fix for browser triple tap bug for android tablet browsers in desktop mode. - Fix populating arcs in reading order menu in reader. - Fix submitting old arc to reader API. - Fix Version API blocking. Add check version admin task. - Fix Library "Poll Every" validation. - Fix Metadata dialog not scrolling sometimes. - Fix file extension for downloaded PDF pages. ## v1.6.18 - Yanked. Broken Reader. ## v1.6.17 - Features - Admin Action buttons now responsive to view size. - Fixes - Auto update wasn't comparing versions well. - Possible fix for initializing admin flags crash. ## v1.6.16 - Fixes - Import may have been marking mounted drive's comics modified inappropriately. - Import crash when moving comics. - Relink deep orphan folders in the db instead of recreating them. - Do not adopt orphan folders deleted from the filesystem. - Admin Tab change password for user broke. - More robust ui cache busting on library update. - Fix minor error on metadata text boxes with null values. ## v1.6.15 - Fixes - Fix more Metadata links to browser groups not computing and resolving properly. ## v1.6.14 - Fixes - Fix Metadata links to browser groups not resetting topGroup properly. ## v1.6.13 - Fixes - Admin Panel Link was showing in the admin panel, not in the browser or reader. ## v1.6.12 - Features - Native Windows installation instructions in the README thanks to @professionaltart. - Anonymously send stats to improve Codex. See admin/flags for description and opt-out. - Fixes - Detect iOS devices in Desktop Mode for proper iOS tap behavior. ## v1.6.9, v1.6.10, v1.6.11 - Yanked. Bad network behavior. Broken javascript. ## v1.6.8 - Fixes - Fix OPDS streaming in lazy metadata mode for Chunky-like readers which require a page count. ## v1.6.7 - Fixes - OPDS authorization for some readers - Remove superfluous debug exception trace on timezone endpoint. ## v1.6.6 - Fixes - User creation in admin panel broke. - There was confusing UI on admin panel unauthorized screen. ## v1.6.5 - Fixes: - Fix logout button not working. ## v1.6.4 - Fixes: - Reader crash loading reader order arcs. - OPDS datetimes now uniformly served in iso format. - Fix browser filter menus clearing and loading irregularities. - Fix parsing negative issue numbers in filenames. - Log common non-ComicBookInfo archive comments with less alarm. - Removed - LOGLEVEL=VERBOSE deprecated for a long time. Use LOGLEVEL=DEBUG ## v1.6.3 - Features - Reader inherits the last browser view, with filters, as it's default reading arc. - When browser page is less than 1, redirect to parent. When 1 and empty, show empty page. - Fix - The cover api was not accepting http basic (opds) authentication. ## v1.6.2 - Fixes - Fix pagination with more than 100 comics in the browser. ## v1.6.1 - Features - Add a retry button on book load error and page load error pages. - Fixes - Fix unable to login if anonymous users prohibited. - Fix filter crash. - Metadata was showing incorrect groups for individual comics ## v1.6.0 - Features - Custom Covers for Folders, Publishers, Imprints, Series and Story Arcs. - Browser setting to choose Dynamic or First group covers. Thanks @Thakk. - Breadcrumbs in the browser - Reader can read by Volumes as well as by Series, Folder and StoryArc. - More compact UI controls. - Metadata tags can click to browse filtered on that tag. - Experimental API throttling support. Search the README for "throttle". - Add websocket updates for anonymous sessions - Speed and caching optimizations. - Fixes - OPDS http basic authorization fixed. - Groups with the same name in different cases collapse into one group in the browser. - Order By respects browser show groups settings. - Fixed re-import of urls and identifiers. - Fixed cleanup of some foreign keys when no longer used. - Clean up all orphan folders on startup instead of first pass - Fix creating bookmarks. - Update browser sessions for user when users finish a book. ## v1.5.19 - Fixes - Metadata crash on folders. ## v1.5.18 - Fixes - Ignore comic pages from dotfiles and macOS resource forks. ## v1.5.17 - Fixes - Fix background color of browser card controls since vuetify update. ## v1.5.16 - Fixes - Fix creating and updating exclude groups. - More Web & url tags parsed from metadata. ## v1.5.15 - Fixes - OPDS streaming broken for some clients (Chunky) without metadata. - OPDS redirects for empty pages or 404's were crashing. - OPDS uses filename fallback for title if missing metadata. ## v1.5.14 - Features - Relative folder path is now searchable if Folder View enabled. - More granular caching hopefully for better performance. - Fixes - OPDS redirects were crashing. - Null search was crashing metadata for single comics - Fix a breakage with fast file static file serving. - Change browser order by to something sensible when search cleared. ## v1.5.13 - Fixes - Fix root folder for library sometimes not created on import. - Fix redirect loop in browser when all members of a group deleted. - Fix browser pagination buttons not advancing. - Fix OPDS v2 crash - Fix browser throbber not appearing when making query. ## v1.5.12 - Fixes - Fix Folder browser offset pagination bug for folder with books and no folders. ## v1.5.11 - Fixes - Fix erroneous Folder View page out of bounds redirect. ## v1.5.10 - Fixes - Folder view was not showing all the books on mixed folder & book pages. - Shutdown and Restart admin tasks were not working. ## v1.5.9 - Fixes - Crash when reading comics in folder view introduced in v1.5.8 ## v1.5.8 - Fixes - No search results was returning every comic instead of no comics. - issue: field searching returned no results. - issue_number, community_rating, & critical rating no longer require two digits of precision. - Excess books included in reader arc/folder/series. - Features - Even Lazier import when Import Metadata Admin flag turned off. - issue: field search now combines numeric and suffix parts. ## v1.5.7 - Fixes - Pagination crash with more than 100 folders. - Experimental fix for Synology Docker CHOWN_PYTHON_SITE_PACKAGES=1 ## v1.5.6 - Fixes - Fix sqlite limit crash when importing > \~1000 web urls. ## v1.5.5 - Fixes - Attempt to fix import crash processing too much metadata at once. Allow undocumented env variable to manipulate this: CODEX_FILTER_BATCH_SIZE (default: 900) - Fix search engine update crash for large collections. ## v1.5.4 - Fixes - Django 5 broke root_path prefixing from the asgi server. Work around it. ## v1.5.3 - Fixes - Mouse horizontal scroll broken on Firefox. ## v1.5.2 - Fixes - OPDS titles were showing as "Unknown" for comics with tagged volumes. - OPDS v2 was crashing. - Cover displayed for group browser with Name ordering was inconsistent. - Enable mouse drag horizontal scrolling in reader zoom mode. ## v1.5.1 - Fixes - OPDS v1 was not rendering any data. ## v1.5.0 - **Warning** - The main database path has changed from `db.sqlite3` to `codex.sqlite3` - This version forces a rebuild of the search index (not the main database) - Fixes - Some integrity checks weren't running on startup. - The metadata page would sometimes crash for Admins. - Moving a comic to a subfolder would crash. - Moving a deep subfolders would crash. - Moving a comic to the root folder would send the comic to the phantom zone. - Updating comics would sometimes not delete removed tags. - Series & Volumes no longer updated too often on import. - Admin Actions was polling all libraries when one selected. - OPDS was showing repeated titles. - Vertical scroller tracking and updating improved. - Page filenames are now sorted case insensitively which should improve order. - Features - Admin Exclude groups compliment the existing Include groups. - New metadata tags: Monochrome, Tagger, GTIN, Review, Identifiers, & Reading Direction. Available when comics are re-imported (Force Update recommended). - Identifiers metadata tag replaces the "Web" tag. - Reads ComicInfo.xml and other formats from the PDF keywords field. You can write ComicInfo.xml to PDFs with comictagger. - Reading Direction reader setting replaces Reader's vertical & horizontal views. - Supports the MetronInfo metadata format (rare). - Filesystem events filtered to only the ones Codex handles. - Double-click to zoom on pages in reader. - Read PDF with browser in a new tab link. - Experimental checkbox for caching entire comic or PDF in the browser. - Admin Flag for disabling most metadata import. - Dev - Using comicbox v1 for metadata import. ## v1.4.3 - Fixes - Crash on undecodable characters in metadata. - Search terms weren't applying to filter choices population. - Fix name ordering. Show series & volume in browser cards if it affects name ordering. - Shrink reader page change boxes to let toolbar activate on corner clicks. - Dev - Big lint update. ## v1.4.2 - Fixes - Groups were not aggregating children properly when searched. - Search could break Folder View. - Changing the browser 'Order By' would sometimes not apply. - Attempt to fix stale books appearing on reader load. ## v1.4.1 - Fixes - A bug that prevented folder view from displaying under some circumstances. ## v1.4.0 - Features - Story Arc Top Group in Web & OPDS Browsers - Support multiple Story Arcs per comic. - Supports Mylar CSV StoryArc / StoryArcNumber extension to ComicInfo.xml - Show only filter options that affect the current browse level. - Reader has a Series/Folder/Story Arc order selector. - Reader shows filename instead of metadata title if you've been browsing in File View - Downloads now use the original filename from disk. - Fix - Folder was view displayed but crashed in OPDS even if disabled by admin. ## v1.3.14 - Features - Better metadata extraction for PDFs. - Support for ComicInfo StoryArcNumber, Review and GTIN tags. - Order by Story Arc Number - Do not detect .cbr files if unrar is not on the path. - Display filename for comics in browser file view. - Fixes - Import of ComicInfo Tags metadata. - Never removed old missing metadata when updated. - Error on moving folders. - Fix saving last route between sessions. - Better error messages if unrar is not on the path. - Removed - Remove support for unrar.cffi ## v1.3.13 - Fixes - Group cover sometimes showing wrong cover for order. - Rare import crash. ## v1.3.12 - Features - OPDS 2 Last Read link. - Fixes - Books without bookmarks could break parts of the reader. - Remove clipboard UI hints when clipboard isn't available. ## v1.3.11 - Features - Last Read Order By option for web and OPDS. - Some Order By options now have a default descending order. - OPDS 1 special top links limited to 100 entries. - Fix - OPDS 1 links did not include filters or order information. - OPDS 1 page streaming broke. ## v1.3.10 - Fixes - Crash when reading from folder view. ## v1.3.9 - Features - Experimental OPDS 2.0 Support. - Create all comic covers admin task. - Faster Metadata pages for web and OPDS. - Fixes - Two pages mode broken. - Credits not imported bug. - Failed imports not removed when file removed bug. ## v1.3.8 - Fixes - Fix Basic Authentication not enabled for OPDS Cover, Page, and Download views. - Tune low memory algorithm slightly lower for memory constrained systems. - Dev - Use makefile and moved most scripts into bin. ## v1.3.7 - Feature - Metadata page links to groups to browse to. - Fixes - Crash when moving comics. - Container memory limits weren't detected for Linux kernels before 4.5 - Reader - Horizontal Reader was slow for comics with high page counts. - Vertical scroller was not tracking pages in fitTo Width or Orig modes. - Validation error detecting child and parent library paths incorrectly. - Dev - Django 4.2 ## v1.3.6 - Fixes - Much lower memory tuning. Environment variables control tuning. - Possible fix for vertical scroller page tracking for tall images. ## v1.3.5 - Fixes - OPDS sorting and filtering broke. - Fixed Download URLs for clients that ignore headers like Chunky. - Update Search Index now checks for more missing entries. ## v1.3.4 - Fixes - Number out of range errors for issue when search indexing. - Total child pages of folders and groups sometimes overcounted, displaying half unread folders. - Reader: Vertical Scroll - Remove black bottom margin from images. - Was loading every page in a comic at once. - Page tracking did not work with images larger than viewport width. ## v1.3.3 - Fixes - Number out of range errors when search indexing. - Possible Search Index Remove Stale and Abort jobs not scheduled properly. - OPDS missing entry ids rejected by Panels reader. - Downloads had an extra period in the suffix. ## v1.3.2 - Fixes - Reader Fit To settings broken - Possible files marked modified too often. ## v1.3.1 - Fixes - An import crash in create foreign keys. - Admin table dates were always in UTC so sometime off by a day. ## v1.3.0 ### I remember... my whole life. Everything - Features - Codex stable in 1GB RAM environments. Faster with more. - Codex uses unrar-cffi if available. Not required. - Browser - Navigate to top button. - Filter by File Type. - OPDS - Top links display only at catalog root. - Extended metadata moved to alternate links. - Admin - Search Indexer Remove Stale Records task much faster. - Comic import speedups. - Fancier sortable admin tables. - Removed `max_db_ops` config variable. - Fixes - Reader vertical scroll lost its place in Fit To Width or Orig mode. - OPDS downloaded files all had the same name. - Search Index - More robust against bad data. - Some search fields were case sensitive. - Admin - Graceful shutdown when Docker container stops. - Codex was backing up on every startup. - Status for batched imports (large imports or low memory) now reflects total instead of single batch. ## v1.2.9 - Features - Vertical scroll option for reader. - Faster search index removes. - Admin Users tab shows last user activity date. - OPDSE PSE 1.2 extension for Panels `pse:lastReadDate` - Fixes - Fixed next and previous book keyboard shortcuts. - Improved OPDS acquisition page performance by removing more "categories" metadata. ## v1.2.8 - Features - Search Index - Improved search indexing times. - Admin Flag to adjust nightly full optimization. - OPDS - "Newest Issues" Link replaced by "Recently Added" after user feedback. - Fixes - Volume tags were often not scanned. Recommend using Force Reimport on all libraries. - OPDS - Fix navigation links not inheriting view settings of current page. - Removed populating categories in OPDS to experiment with performance issues. - Fix OPDS pse lastRead tag. - Block library polling during database updates, fixes reindexing. ## v1.2.7 - Fixes - Trap final search index commit errors and try again without merging segments. - Fix moving folders assigned no parent folder, displaying them in root. ## v1.2.6 - Fixes - Impose memory limits on search index writers. - Impose items before write limits search index writer. - Sort comics by path for the reader navigation when in Folder View. - Remove inappropriate vertical scroll bars from page images. ## v1.2.5 - Features - In Folder View the reader navigates by folder instead of series. - Fixes - OPDS crash on missing 24 hour time setting input required. ## v1.2.4 - Features - User configurable 24 hour time format. - Reader - Displays covers as one page even in two page mode. - Read in Reverse mode. - Keymaps for adjusting page by one page in two page mode. - Previous and Next book navigation buttons and keymaps. - Fixes - OPDS: - Fix acquisition feed timeouts on large libraries by removing most m2m fields that populated OPDS categories - Fix pagination - Show series name in comic title. - Experiment: don't show top links or entry facets on pages > 1 - Reader: - Two pages mode would skip pages. - Next/prev book goes to correct page for Right To Left tagged books. - Fix occasional error setting reader settings. - Fixed noop poll event happening on comic cover creation. ## v1.2.3 - Fixes - Prevent search indexing starting over if it encounters errors. - Fix download buttons. - Fix admin settings drawer obscuring small screens. - Fix scroll bars showing inapproporately on admin tables. - Fix OPDS authors having 'i' appended. ## v1.2.2 - Fixes - Fix all items removed from search index after update. - Speedups to cleaning up search engine ghosts. ## v1.2.1 - Fixes - Crash on building a fresh database. - Fixed an importer crash when it tried to wait for changing files. - Disabling Library Poll prevented manual polling. - More explicit Poll Every hints in edit dialog. - Repository link didn't open a new window. ## v1.2.0 ### What kind of Heaven uses bounty hunters? - Features - Faster and more robust PDF support. Codex no longer depends on the poppler library. - LOGLEVEL=VERBOSE deprecated in favor of DEBUG - Stats page API accessible via API key as well as admin login. - Fixes - Some Librarian Status messages would appear never to finish. - Development - The multiprocessing method is now S P A W N 💀 on all platforms. - Websockets are now handled by customized Django channels - aioprocessing Queue communicates between librarian and channels. ## v1.1.6 - Fixes - Fix rare deletion and recreation of all comics when inodes changed. ## v1.1.5 - Features - Admin Stats tab - Libraries can have a poll delay longer than 1 day. - Fixes - Crash when removing comics. - Admin Create & Update dialogs would get stuck open on submit. - Delete expired and corrupt sessions every night. - More liberal touch detection for more devices. ## v1.1.4 - Fixes - Multiprocessing speedup for large search engine indexing jobs - Writes search engine data in segments. - Search engine segment combiner optimizer runs nightly (and manually). ## v1.1.3 - Fixes - Fix some OPDS browsers unable to read comics. ## v1.1.2 - Fixes - Fix unable to initialize database on first run ## v1.1.0 ### Whoosh - Features - Switch to Whoosh Search Engine. - You may delete `config/xapian_index`. - May run on Windows now? - Moved db backups to `config/backups`. - Backup database before migrations. - Removed - Do not store search history for combobox across sessions. - Fixes - Fix Admin Library folder picker. - Uatu does a better job of ignoring device changes. - Don't pop out of folder mode on searches. - Fix showing error on unable to load comic image. ## v1.0.3 - Features - Force update all failed imports admin task. - Fixes - Fix moving folders to subfolder orphans folders bug. - Fix id does not exist redirect loop. ## v1.0.2 - Features - Support for Deflate64 zip compression algorithm. - Fixes - Fix Failed Imports not retrying import when updated. - Make db updates more durable and possibly problem comics paths in log. - Discard orphan websocket connections from the connection pool. - Fix Admin Status drawer closing at wrong time. ## v1.0.1 - Features - Justify order-by field in browser cards. - Fixes - Fixed next book change drawer opening settings drawer. - Fixed zero padding on browser card issue numbers. ## v1.0.0 ### Vue 3 - Features - Removed old django admin pages. - Shutdown task for admins. - Configure logging with environment variables. See README. - Fixes - Fix displaying error in login dialog. - Fix saving community & critical rating filters to session - Fix fit to screen not enlarging pages smaller than screen. - Developer - Frontend is now Vuetify 3 over Vue 3. Using options API. ## v0.14.5 - Fixes - Fix crash on decoding some comics metadata charset encoding. ## v0.14.4 - Fixes - Fix login not available when AdminFlag Enable Non Users was unset. - Fix server PicklingError logging bug. ## v0.14.3 - Fixes - Fix root_path configuration ## v0.14.2 - Fixes - Fix Librarian process hanging due to logging deadlock. - Fix reader keyboard shortcut help. - Fix book change drawer appearing in the middle of books. ## v0.14.1 - Fixes - Resolve ties in browser ordering with default comic ordering. - Always close book change drawer before reader opens. ## v0.14.0 ### Sliding Pages - Features - Animated sliding pages on reader. - Comic & PDF pages display loading, rendering and password errors. - Fixes - Filters with compound names were not loading choices. - Show only usable filters for current view as filter choices. - Allow filtering by None values when None values exist. - Handle an iOS bug with downloading pages and comics inside a PWA. - Fixed PDF failure to render on load and after changing settings. - Login & Change Password dialogs no longer activate Reader shortcuts by accident. ## v0.13.0 ### Admin Panel - Features - Single Page Admin Panel. - Users may now change their own passwords. - OPDS - Use facets for known User Agents that support them. Default to using entry links. - Gain a Newest Issues facet, a Start top link and a Featured / Oldest Unread link. - More metadata tags. - Special thanks to @beville for UX research and suggestions - HTTP Basic auth only used for OPDS. - Frontend components do lazy loading, should see some speedups. - Fixes - Fixed imprints & volume levels not displaying sometimes. - Fix large images & downloads for some OPDS clients. - Developer - API v3 is more restful. - /api/v3/ displays API documentation. - Vite replaces Vue CLI. - Pina replaces Vuex. - Vitest replaces Jest. - Django livereload server and debug toolbar removed. ## v0.12.2 - Fixes - Fix OPDS downloading & streaming for Chunky Comic Reader. - Hack in facets as nav links for Panels & Chunky OPDS readers. ## v0.12.1 - Fixes - Disable article ignore on name sort in folder view. - Fix browser navigation bug with issues top group. ## v0.12.0 ### Syndication - Features - OPDS v1, OPDS Streaming & OPDS Search support. - Codex now accepts HTTP Basic authentication. - If you run Codex behind a proxy that accepts HTTP Basic credentials that are different than those for Codex, be sure to disable authorization forwarding. - Larger browser covers. - Sort by name ignores leading articles in 11 languages. - Fixes - Use defusexml to load xml metadata for safety. - Removed process naming. My implementation was prone to instability. ## v0.11.0 ### Task Monitor - Features - Librarian tasks in progress appear in the settings side drawer for adminstratiors. - Covers are now created on demand by the browser, rather than on import. - Browser Read filter. - Fixes - Bookmark progress bar updates in browser after closing book. - Metadata web links fix. ## v0.10.10 - Features - Reader nav toolbar shows position in series. - Fixes - Fix inability to log in when Enable Non Users admin flag is unset. - Simplify Admin Library delete confirmation page to prevent OOM crash. - Move controls away from iphone notch and home bar. ## v0.10.9 - Fixes - Fix null bookmark and count fields in metadata - Fix indeterminate finished state when children have bookmark progress. - Fix maintenance running inappropriately on first run. Crashed xapian database. - Fix reader metadata keymap - Features - Progressive Web App support - Reader "Shrink to" settings replaced by "Fit to" - Special Thanks - To ToxicFrog, who's been finding most of these bugs I'm fixing for a while. ## v0.10.8 - Fixes - Fixed reader nav clicks always showing the toolbars. - Attempt to fix unwanted browser toolbars when treated as mobile app - Wait half a second before displaying reader placeholder spinner. - Fix metadata missing search query. - Fix metadata cache busting. - Features - Accessibility enhancements for screen readers. ## v0.10.7 - Features - Browser tries to scroll to closed book to keep your place. - Fixes - Fixed missing lower click area on browser cards. - Fixed session bookmark interfering with logged in user bookmark. ## v0.10.6 - Broken docker container ## v0.10.5 - Features - Reader shrink to screen setting becomes fit to screen and embiggens small images. - Reader changing to the next book now has visual feedback and requires two clicks. - Fixes - Removed vertical scrollbars when Reader shrunk to height. - Don't disturb the view when top group changes from higher to lower. ## v0.10.4 - Fixes - Fix double tap for non-iOS touch devices. - Features - Shrink to Screen reader setting. - Reader throbber if a page takes longer than a quarter second to load. ## v0.10.3 - Fixes - Fix PDF going blank when settings change. - Remove vestigial browser scrollbars when they're not needed. Thanks to ToxicFrog. - Fix cover cleanup maintenance task. ## v0.10.2 - Fixes - URLS dictate view over top group. Fixes linking into views. - Fix possible cover generation memory leak. - Build a deadfall trap for search indexer zombies. Use Offspring's brains as bait. ## v0.10.1 - Fixes - Linked old top level comics orphaned by library folders migration. ## v0.10.0 ### Portable Document Format - Features - PDF support. Optional poppler-utils binary package needed to generate PDF cover thumbnails. - CBT support. Tarball comic archives. - Alphanumeric issue support. Requires rescanning existing comics. - Individual top level folders for each library. - Don't duplicate folder name in filename sort. - Fixes - Comic file suffixes now matched case insensitively. - Finished comics count as 100% complete for bookmark aggregation. - Mark all folder descendant comics un/read recursively instead of immediate children. - Don't leak library root paths in Folder View for non-admins in the API. - Fixed aggregation bug showing inaccurate data when viewing group metadata. - More accurate Name sorting. - Fixed default start page for RTL comics. - Disabled reading links for empty comics. - Shield radiation from Venus to reduce zombie incidents. ## v0.9.14 - Fixes - Fix comicbox config crash. - Use codex config namespace (\~/.config/codex) so codex doesn't interfere with standalone comicbox configs. - Comic issue numbers display to two decimal points instead of using ½ glyphs. - Features - Filename order by option. Disabled if the "Enable Folder View" Admin Flag is off. ## v0.9.13 - Fixes - Fix root_path configuration for running codex in url sub-paths - Parse new filename patterns for metadata. - Slightly faster comic cover generation. ## v0.9.12 - Fixes - Fix setting global reader settings. - Fixed reader settings not applying due to caching. - Bust reader caches when library updates. - Reader titles smaller and wrap on mobile. - Fixed deep linking into reader. - Features - Disable reader prev/next touch swiping for phone sized browsers. ## v0.9.11 - Fixes - Fixed covers not creating on import. - Covers update in browser when updated on disk. - Create missing covers on startup. - Bust browser cache when library updates. - Reader settings were not applying in some cases. - Fixed crash updating latest codex software version from the internet. - Fixed crash loading admin page. - Features - Codex processes show names in ps and thread names on Linux. - Add Poll libraries action to FailedImports Admin Panel. - Space and shift-space previous and next reader shortcuts. - Reader settings UI redesigned to be clearer. ## v0.9.10 Yanked. Crash loading admin page. ## v0.9.9 - Fixes - Fixed combining CBI credits with other format credits - Failed imports notification appears only for new failed imports. - Features - Update search index daily. - Clean up orphan comic covers every night. ## v0.9.8 - Fixes - Fixed search index update crash while database is still updating. - Fixed issues larger than 99 bug. - Fixed issue not imported due to metadata cleaning bug. - Fixed crash updating search index while library was still updating. - Thread error trapping and diagnostics to root out zombie process issue. - Sort numeric terms in filter menus numerically not alphabetically. - Fixed comic name display wrapping in browser. - Features - More comprehensive metadata sanitizing before import. - Reduced time checking to see if files have finished writing before import. - Uniform format for metadata parsing logging. - Credits sorted by last name. ## v0.9.7 - Fixes - Coerce decimal values into valid ranges and precision before importing. - Features - Clean up unused foreign keys once a day instead of after every import. - Clean up unused foreign keys librarian job available in admin panel. ## v0.9.6 - Fixes - Don't open browser when a library changes when reading a comic. - Fixed crash creating illegal dates on import. - Features - Replace description field with more common ComicInfo comments field. - Log files now rotate by size instead of daily. - Log path for failed imports and cover creation. ## v0.9.5 - Fixed - Use an allow list for importing metadata to prevent crashes. ## v0.9.4 - Fixes - Fixed crash when importing comments metadata. ## v0.9.3 - Fixes - Import credits data for CBI and CIX tagged comics. - More liberal metadata decimal parsing. ## v0.9.2 - Fixes - Fix rare migration bug for Cover Artist role. ## v0.9.1 - Fixes - Fix to library group integrity checker ## v0.9.0 ### Private Libraries - Features - Libraries may have access restricted to certain user groups. - The "Critical Rating" tag is now a decimal value. - The "Community Rating" tag replaced "User Rating" tag, a decimal value. - Cover Credits role replaced by "Cover Artist". - Reader has a "Download Page" button. - Metadata dialog highlights filtered items. - Metadata dialog is faster. - Admin Queue Job for creating missing comic covers. ## v0.8.0 ### Search - Features - Metadata search field in browser - Settings dialogs replaced with side drawers - Changed some keyboard shortcuts in reader. - "group by" renamed to "top group". - Admin panel gained a Queue Jobs page. - Fixes - Browser does a better job of remembering your last browser view on first load. - Reader's "close book" button now does a better job returning you to your last browser view. - Metadata panel cleanup and fix some missing fields. - Binary Dependencies - Codex now requires the Xapian library to run as a native application - Drop Support - The linux/armhf platform is no longer published for Docker. - License - Codex is GPLv3 ## v0.7.5 - Fixes - Fix integrity cleanup check for old comic_folder relations that prevented migrations. ## v0.7.4 - Fixes - Fix integrity cleanup check for more types of integrity errors that may have prevented clean db migrations. - Fix last filter, group, sort not loading properly for some new views. ## v0.7.3 - Fixes - Fix crash updating latest version. - Fix a folder not found crash in folder view. - Features - Database indexing speedups. ## v0.7.2 - Fixes - Fix another integrity check bug ## v0.7.1 - Fixes - Fix and integrity check crash that happened with an older databases. - Features - Added `CODEX_SKIP_INTEGRITY_CHECK` env var. ## v0.7.0 ### Feels Snappier - Database Migration - v0.7.0 changes the database schema. Databases run with v0.7.0+ will not run on previous versions of codex. - Features - Big speed up to importing comics for large imports. - Speed up creating comic covers on large imports. - Admin Panel options for polling (formerly "scanning") and watching events have changed names. - Admin Panel task added to regenerate all comic covers. - Browser Admin Menu option added for polling all Libraries on demand. - Comics with no specified Publishers, Imprints and Series no longer have induced default names for these but have no name like Volumes. - Codex repairs database integrity on startup. - Codex backs up the database every night. - Autodetect server timezone (for logging). - Use TZ and TIMEZONE environment variables to explicitly set server timezone. - Added `VERBOSE` logging level to help screen out bulk `DEBUG` messages from dependencies. - Truncated logging messages for easier reading. - Fixes - Fixed metadata screen displaying incorrect information. - Now compatible with python 3.10. ## v0.6.8 - Fixes - Fixes some import bugs with filename parsing when there are no tags - Fixed two page view toggle hotkey - Features - Browser now tells you what kind of items you're looking at. - Reader swiping navigation - Reader keyboard shortcut help dialog - Tentative linux/armhf support. No way for me to test this - Vacuum the sqlite database once a day to prevent bloat - Corrupt database rebuild procedure. See README. ## v0.6.7 - Dark admin pages and fix template overrides. ## v0.6.6 - Automate multi-arch builds ## v0.6.5 - Build Docker images for amd64 & arm64 manually. ## v0.6.4 - Fix reader bug that only displayed first page ## v0.6.3 - Add LOGLEVEL environment variable. - Set to DEBUG to see everything. - Removed DEV environment variable. - Possible fix for newly imported covers not displaying. ## v0.6.2 - Fixes - Fixed intermittent Librarian startup crash in docker. - Fixed DEBUG environment variable to be able to run in production. - Dev - Added DEV environment variable for dev environment. ## v0.6.1 - Fixes - Fix librarian startup crash. Prevented admin actions from happening. ## v0.6.0 ### Better Filtering and Sorting - Features - New Filters - New sort options: Updated Time and Maturity Rating - New frontend URL scheme - New API - Added time to the browse card when sorting by time fields - Browser pagination footer now remains fixed on the page - Browser pagination footer is now a slider to handle larger collections - Notifications now appear in reader as well as browser - On comic import failure, log the path as well as the reason - Codex version information moved to Browser > Settings - Fixes - Fixed a bug importing Story Arc Series Groups and Genres. Requires re-import to correct. - Fixed a bug with sorting that grouped improperly and showed the wrong covers for reverse sorts. - Scanning notifications on login not disappearing bug squashed - Fixed a bug where the browser settings menu wouldn't close when opening a dialog ## v0.5.18 - Fixes - Fix filters not changing display bug ## v0.5.17 - Fixes - Fix root_path not parsing bug ## v0.5.16 - Fixes - Fix broken startup when parsing json shared between front and back end ## v0.5.15 - Features - Metadata popup is now faster. - Metadata popup now shows created_at, updated_at and path (if admin). - Removed numeric and common password validators. Made the minimum length 4. ## v0.5.14 - Features - Metadata view for browse containers. Also observes filters. - Covers now regenerate on re-import. - Fixes - Fix scanning notification - Fix unable to delete libraries bug ## v0.5.13 - Features - Admin Flag for automatically updating codex - Force updates from the admin panel with an Admin Flag action - Snackbar for notifying about failed imports ## v0.5.12 - Features - Admin page for failed imports - Snackbar tells admins when scans are happening - Report the latest version available in the browser footer tooltip - Admin flag for disabling codex for non-users ## v0.5.11 - Features - Browser rows now adapt to browse tile size - Browser covers for containers now match data we're sorting by - Serve static files faster - Fixes - Reader fix settings for all comics were not setting properly - Fix bookmarks for sessions that aren't logged in ## v0.5.10 - Fixes - Fix filtering bugs - Fix mark read/unread bugs - Fix reader settings not setting properly - Fix reader images positioning - Fix minor crash closing books with uninitialized browser app ## v0.5.9 - Fixes - Fix sorting for folder view - Fix import bugs - Features - Display sort key value in browse tile - Display standard image for missing covers - Slightly more helpful 404 page ## v0.5.8 - Upload mistake with 0.5.7. This is just a version bump. ## v0.5.7 - Fixes - Fix import crashes - Remove scan locks on startup - Features - Allow credits with an empty role - Pagination of large browse results - Center comic pages better - Add download link to browser menu - Log to files as well as console ## v0.5.6 - Fixes - Websocket path security wasn't handling leading slashes well. Skip it. ## v0.5.5 - Fixes - Revert to whitenoise 5.1.0 which works with subpaths ## v0.5.4 - Fixes - Fix crash on start if all static dirs do not exist. ## v0.5.3 - Fixes - Fixed login bug introduces in v0.5.3 (thanks hubcaps) - Fixed filtering bug introduced in v0.5.2 - Features - Versioned API - Toast popup for admins indicating libraries are scanning. - Periodic frontend refresh during long scans. - Codex version displayed in browser footer ## v0.5.2 - Features - Lazy load filter choices when the menu opens - Documentation moved into admin panel - Fixes - Fix multiprocessing for Windows ## v0.5.1 - Minor bugfixes. ## v0.5.0 ### First useful working version - Productionized alpha release ## v0.4.0 ### Polished UI - Polished VueJS frontend ## v0.3.0 ### I'm a frontend developer! - Single Page VueJS frontend PoC without much styling ## v0.2.0 ### It's alive - Working application with all initial features - Django frontend ## v0.1.0 ### Hello world - Proof of concept. ================================================ FILE: README.md ================================================ # Codex A comic archive browser and reader. ## 🚨 Announcement 🚨 ### Docker The Docker image has moved to [ghcr.io/ajslater/codex](https://github.com/ajslater/codex/pkgs/container/codex). A final docker.io image has been released on dockerhub. ## ✨ Features - Codex is a web server. - Full text search of comic metadata and bookmarks. - Filter and sort on all comic metadata and unread status per user. - Browse a tree of Publishers, Imprints, Series, Volumes, or your own folder hierarchy, or by tagged Story Arc. - Read comics in a variety of aspect ratios and directions that fit your screen. - Watches the filesystem and automatically imports new or changed comics. - Anonymous browsing and reading or reigistered users only, to your preference. - Per user bookmarking & settings, even before you make an account. - Private Libraries accessible only to certain groups of users. - Reads CBZ, CBR, CBT, and PDF formatted comics. - Syndication with OPDS 1 & 2, streaming, search and authentication. - Add custom covers to Folders, Publishers, Imprints, Series, and Story Arcs. - Remote-User HTTP header SSO support. - Runs in 1GB of RAM, faster with more. - GPLv3 Licenced. ### Examples - _Filter by_ Story Arc and Unread, _Order by_ Publish Date to create an event reading list. - _Filter by_ Unread and _Order by_ Added Time to see your latest unread comics. - _Search by_ your favorite character to find their appearances across different comics. ## 👀 Demonstration You may browse a [live demo server](https://demo.codex-reader.app/) to get a feel for Codex. ## 📜 News Codex has a [NEWS file](NEWS.md) to summarize changes that affect users. ## 🕸️ HTML Docs [HTML formatted docs are available here](https://codex-comic-reader.readthedocs.io) ## 📦 Installation ### Install & Run with Docker Run the official [Docker Image](https://github.com/ajslater/codex/pkgs/container/codex) at ghcr.io/ajslater/codex. Read the [Docker instructions](docs/DOCKER.md) You'll then want to read the [Administration](#administration) section of this document. ### Install & Run on HomeAssistant server If you have a [HomeAssistant](https://www.home-assistant.io/) server, Codex can be installed with the following steps : - Add the `https://github.com/alexbelgium/hassio-addons` repository by [clicking here](https://my.home-assistant.io/redirect/supervisor_add_addon_repository/?repository_url=https%3A%2F%2Fgithub.com%2Falexbelgium%2Fhassio-addons) - Install the addon : [click here to automatically open the addon store, then install the addon](https://my.home-assistant.io/redirect/supervisor) - Customize addon options, then then start the add-on. ### Install & Run as a Native Application You can also run Codex as a natively installed python application with pip. #### Binary Dependencies You'll need to install the appropriate system dependencies for your platform before installing Codex. ##### Linux Dependencies ###### Debian Dependencies ...and Ubuntu, Mint, MX, Window Subsystem for Linux, and others. ```sh apt install build-essential libimagequant0 libjpeg-turbo8 libopenjp2-7 libssl libyaml-0-2 libtiff6 libwebp7 python3-dev python3-pip sqlite3 unrar zlib1g ``` Versions of packages like libjpeg, libssl, libtiff may differ between flavors and versions of your distribution. If the package versions listed in the example above are not available, try searching for ones that are with `apt-cache` or `aptitude`. ```sh apt-cache search libjpeg-turbo ``` ###### Alpine Dependencies ```sh apk add bsd-compat-headers build-base jpeg-dev libffi-dev libwebp openssl-dev sqlite yaml-dev zlib-dev ``` ##### Install unrar Runtime Dependency on non-debian Linux Codex requires unrar to read CBR formatted comic archives. Unrar is often not packaged for Linux, but here are some instructions: [How to install unrar in Linux](https://www.unixtutorial.org/how-to-install-unrar-in-linux/) Unrar as packaged for Alpine Linux v3.14 seems to work on Alpine v3.15+ ##### macOS Dependencies Using [Homebrew](https://brew.sh/): ```sh brew install jpeg libffi libyaml libzip openssl python sqlite unrar webp ``` #### Installing Codex on Linux on ARM (AARCH64) with Python 3.13 Pymupdf has no pre-built wheels for AARCH64 so pip must build it and the build fails on Python 3.13 without this environment variable set: ```sh PYMUPDF_SETUP_PY_LIMITED_API=0 pip install codex ``` You will also have to have the `build-essential` and `python3-dev` or equivalent packages installed on on your Linux. #### Windows Installation Windows users are encouraged to use Docker to run Codex, but it will also run natively on the Windows Subsystem for Linux. Installation instructions are in the [Native Windows Dependencies Installation Document](docs/WINDOWS.md). #### Run Codex Natively Once you have installed codex, the codex binary should be on your path. To start codex, run: ```sh codex ``` ### Use Codex Once installed and running you may navigate to ## 👑 Administration ### Navigate to the Admin Panel - Click the hamburger menu ☰ to open the browser settings drawer. - Log in as the 'admin' user. The default administrator password is also 'admin'. - Navigate to the Admin Panel by clicking on its link in the browser settings drawer after you have logged in. ### Change the Admin password The first thing you should do is log in as the admin user and change the admin password. - Navigate to the Admin Panel as described above. - Select the Users tab. - Change the admin user's password using the small lock button. - You may also change the admin user's name with the edit button. - You may create other users and grant them admin privileges by making them staff. ### Add Comic Libraries The second thing you will want to do is log in as an Administrator and add one or more comic libraries. - Navigate to the Admin Panel as described above. - Select the Libraries tab in the Admin Panel - Add a Library with the "+ LIBRARY" button in the upper left. ### Reset the admin password If you forget all your superuser passwords, you may restore the original default admin account by running codex with the `CODEX_RESET_ADMIN` environment variable set. ```sh CODEX_RESET_ADMIN=1 codex ``` or, if using Docker: ```sh docker run -e CODEX_RESET_ADMIN=1 -v host-parent-dir/config:/config ajslater/codex ``` ### Private Libraries In the Admin Panel you may configure private libraries that are only accessible to specific groups. A library with _no_ groups is accessible to every user including anonymous users. A library with _any_ groups is accessible only to users who are in those groups. Use the Groups admin panel to create groups and the Users admin panel to add and remove users to groups. #### Include and Exclude Groups Codex can make groups for libraries that exclude groups of users or exclude everyone and include only certain groups of users. ### PDF Metadata Codex reads PDF metadata from the filename, PDF metadata fields and also many formats of common complex comic metadata if they are embedded in the PDF `keywords` field. If you decide to include PDFs in your comic library, I recommend taking time to rename your files so Codex can find some metadata. Codex recognizes several file naming schemes. This one has good results: `{series} v{volume} #{issue} {title} ({year}) {ignored}.pdf` Complex comic metadata, such as ComicInfo.xml, can be also be embedded in the keywords field by using the [comicbox](https://github.com/ajslater/comicbox) command line tool. Codex will read this data because it relies on comicbox internally. Not many people use comicbox or embedded metadata in PDFs in this fashion, so you likely won't find it unless you've added it yourself. ### 🗝️ API with Key Access Codex has a limited number of API endpoints available with API Key Access. The API Key is available on the admin/stats tab. ## 🎛️ Configuration ### Config Dir The default config directory is `config/` directly under the working directory you run codex from. You may specify an alternate config directory with the environment variable `CODEX_CONFIG_DIR`. The config directory contains a file named `codex.toml` where you can specify ports and bind addresses. If no `codex.toml` is present Codex copies a default one to that directory on startup. e.g. ```toml [server] host = "0.0.0.0" port = 9810 url_path_prefix = "" ``` The config directory also holds the main sqlite database, a Django cache and comic book cover thumbnails. ### Environment Variables Environment variables override values set in the TOML config file. #### General - `TIMEZONE` or `TZ` will explicitly set the timezone in long format (e.g. `"America/Los Angeles"`). This is useful inside Docker because codex cannot automatically detect the host machine's timezone. - `DEBUG_TRANSFORM` will show verbose information about how the comicbox library reads all archive metadata sources and transforms it into a the comicbox schema. - `CODEX_CONFIG_DIR` will set the path to codex config directory. Defaults to `$CWD/config` ##### Server - `GRANIAN_HOST` the IP or hostname to serve Codex from. Defaults to "0.0.0.0", all interfaces. - `GRANIAN_PORT` the port to serve Codex from. Defaults to 9810. - `GRANIAN_WORKERS` Number of worker processes. 1 recommended for containerized environments. - `GRANIAN_HTTP` HTTP protocol to use. "auto", "1" or "2". Defaults to "auto". Generally you want to serve codex from behind nginx or traefik which will handle the protocol, even HTTP 3, so this should stay on "auto". - `GRANIAN_WEBSOCKETS` Enable websockets. Required for codex live updates. Default true. - `GRANIAN_URL_PATH_PREFIX` HTTP path prefix for codex (e.g. "/codex" for reverse proxy sub-path). Defaults to "". ##### Repair - `CODEX_RESET_ADMIN=1` will reset the admin user and its password to defaults when codex starts. - `CODEX_FIX_FOREIGN_KEYS=1` will check for and try to repair illegal foreign keys on startup. - `CODEX_INTEGRITY_CHECK=1` will perform database integrity check on startup. - `CODEX_FTS_INTEGRITY_CHECK=1` will perform an integrity check on the full text search index. - `CODEX_FTS_REBUILD=1` will rebuild the full text search index. #### Logging - `LOGLEVEL` will change how verbose codex's logging is. Valid values are `CRITICAL`, `ERROR`, `WARNING`, `SUCCESS`, `INFO`, `DEBUG`, and the overly noisy `TRACE`. The default is `INFO`. - `CODEX_LOG_DIR` sets a custom directory for saving logfiles. Defaults to `$CODEX_CONFIG_DIR/logs` - `CODEX_LOG_RETENTION` how long to keep logs. Defaults to "6 months". - `CODEX_LOG_TO_FILE=0` will not log to files. - `CODEX_LOG_TO_CONSOLE=0` will not log to the console. ##### Browser - `CODEX_BROWSER_MAX_OBJ_PER_PAGE` the maximum number of objects per page. Defaults to 100. #### Throttling Codex contains some experimental throttling controls. The value supplied to these variables will be interpreted as the maximum number of allowed requests per minute. For example, the following settings would limit each described group to 2 queries per second. - `CODEX_THROTTLE_ANON=30` Anonymous users - `CODEX_THROTTLE_USER=30` Authenticated users - `CODEX_THROTTLE_OPDS=30` The OPDS v1 & v2 APIs (Panels uses this for search) - `CODEX_THROTTLE_OPENSEARCH=30` The OPDS v1 Opensearch API #### Authentication - `CODEX_AUTH_REMOTE_USER` will allow unauthenticated logins with the Remote-User HTTP header. This can be very insecure if not configured properly. Please read the Remote-User docs devoted to it below. ### Reverse Proxy [nginx](https://nginx.org/) is often used as a TLS terminator and subpath proxy. Here's an example nginx config with a subpath named '/codex'. ```nginx # HTTP proxy_set_header Host $http_host; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $server_name; proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Scheme $scheme; # Websockets proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade" location /codex { proxy_pass http://codex:9810; # Codex reads http basic authentication. # If the nginx credentials are different than codex credentials use this line to # not forward the authorization. proxy_set_header Authorization ""; } ``` Specify a reverse proxy sub path (if you have one) in `config/codex.toml` ```toml [server] url_path_prefix = "/codex" ``` #### Nginx Reverse Proxy 502 when container refreshes Nginx requires a special trick to refresh dns when linked Docker containers recreate. See this [nginx with dynamix upstreams](https://tenzer.dk/nginx-with-dynamic-upstreams/) article. #### Single Sign On and Third Party Authentication ##### OAuth & OIDC Codex is not an OIDC client at this time. However the following Remote-User and Token Authentication methods may assist other services in providing Single Sign On. ##### Remote-User Authentication Remote-User authentication tells Codex to accept a username from the webserver and assume that authentication has already been done. This is very insecure if you haven't configured an authenticating reverse proxy in front of Codex. Here's a snipped for configuring nginx with tinyauth to provide this header. This snipped it incomplete and assumes that the rest of nginx tinyauth config has been done: ```nginx auth_request_set $tinyauth_remote_user $upstream_http_remote_user; proxy_set_header Remote-User $tiny_auth_user; ``` ⚠️ Only turn on the `CODEX_AUTH_REMOTE_USER` environment variable if your webserver sets the `Remote-User` header itself every time for the Codex location, overriding any malicious client that might set it themselves. ⚠️ ##### HTTP Token Authentication You can also configure your proxy to add token authentication to the headers. Codex will read “Bearer” prefixed authorization tokens. The token is unique for each user and may be found in the Web UI sidebar. You must configure your proxy or single sign on software to send this token. ```nginx set user_token 'user-token-taken-from-web-ui'; proxy_set_header Authorization "Bearer $user_token"; ``` ### Restricted Memory Environments Codex can run with as little as 1GB available RAM. Large batch jobs –like importing and indexing tens of thousands of comics at once– will run faster the more memory is available to Codex. The biggest gains in speed happen when you increase memory up to about 6GB. Codex batch jobs do get faster the more memory it has above 6GB, but with diminishing returns. If you must run Codex in an admin restricted memory environment you might want to temporarily give Codex a lot of memory to run a very large import job and then restrict it for normal operation. ## 📖 Use ### 👤 Sessions & Accounts Once your administrator has added some comic libraries, you may browse and read comics. Codex will remember your preferences, bookmarks and progress in the browser session. Codex destroys anonymous sessions and bookmarks after 60 days. To preserve these settings across browsers and after sessions expire, you may register an account with a username and password. You will have to contact your administrator to reset your password if you forget it. ### ᯤ OPDS Codex supports OPDS syndication and OPDS streaming. You may find the OPDS url in the side drawer. It should take the form: `http(s)://host.tld(:9810)(/path_prefix)/opds/v1.2/` or `http(s)://host.tld(:9810)(/path_prefix)/opds/v2.0/` #### OPDS v1 Clients - iOS - [Panels](https://panels.app/) - [PocketBooks](https://pocketbook.ch/) - [KYBook 3](http://kybook-reader.com/) - [Chunky Comic Reader](https://apps.apple.com/us/app/chunky-comic-reader/id663567628) - Android - [Moon+](https://play.google.com/store/apps/details?id=com.flyersoft.moonreader) - [Librera](https://play.google.com/store/apps/details?id=com.foobnix.pdf.reader) Kybook 3 does not seem to support http basic authentication, so Codex users are not supported. #### OPDS v2 Clients OPDS 2.0 is a newer protocol that is only just starting to be supported by new clients. - [Stump (Alpha Test)](https://www.stumpapp.dev/guides/mobile/app) - [Readest](https://readest.com/) (No page streaming yet, download only) #### OPDS Authentication ##### OPDS Login The few clients that implement the OPDS 1.0 Authentication spec present the user with a login screen for interactive authentication. ##### HTTP Basic Some OPDS clients allow configuring HTTP Basic authentication in their OPDS server settings. If the don't, you will have to add your username and password to the URL. In that case the OPDS url will look like: `http(s)://username:password@codex-server.tld(:9810)(/path_prefix)/opds/v1.2/` ##### HTTP Token Some clients allow adding a unique login token to the HTTP headers. Codex will read "Bearer" prefixed authorization tokens. The token is unique for each user and may be found in the Web UI sidebar. #### Supported OPDS Specifications ##### OPDS v1 - [OPDS 1.2](https://specs.opds.io/opds-1.2.html) - [OPDS-PSE 1.2](https://github.com/anansi-project/opds-pse/blob/master/v1.2.md) - [OPDS Authentication 1.0](https://drafts.opds.io/authentication-for-opds-1.0.html) ##### OPDS v2 - [OPDS 2.0 (draft)](https://drafts.opds.io/opds-2.0.html) - [OPDS 2.0 Digital Visual Narratives Profile (DiViNa)](https://github.com/readium/webpub-manifest/blob/master/profiles/divina.md) - [OPDS 2.0 Authentication (proposal)](https://github.com/opds-community/drafts/discussions/43) - [OPDS 2.0 Progression (proposal)](https://github.com/opds-community/drafts/discussions/67) ##### OpenSearch v1 - [OpenSearch 1.1 (draft)](https://github.com/dewitt/opensearch) ## [🩺 Troubleshooting](#troubleshooting) ### 📒 Logs Codex collects its logs in the `config/logs` directory. Take a look to see what th e server is doing. You can change how much codex logs by setting the `LOGLEVEL` environment variable. By default this level is `INFO`. To see more verbose messages, run codex like: ```sh LOGLEVEL=DEBUG codex ``` ### Watching Filesystem Events with Docker Codex tries to watch for filesystem events to instantly update your comic libraries when they change on disk. But these native filesystem events are not translated between macOS & Windows Docker hosts and the Docker Linux container. If you find that your installation is not updating to filesystem changes instantly, you might try enabling polling for the affected libraries and decreasing the `poll_every` value in the Admin console to a frequency that suits you. ### Emergency Database Repair If the database becomes corrupt, Codex includes a facility to rebuild the database. Place a file named `rebuild_db` in your Codex config directory like so: ```sh touch config/rebuild_db ``` Shut down and restart Codex. The next time Codex starts it will back up the existing database and try to rebuild it. The database lives in the config directory as the file `config/db.sqlite3`. If this procedure goes kablooey, you may recover the original database at `config/backups/codex.sqlite3.before-rebuild`. Codex will remove the `rebuild_db` file. ### Warnings to Ignore #### StreamingHttpResponse Iterator Warning ```pycon packages/django/http/response.py:517: Warning: StreamingHttpResponse must consume synchronous iterators in order to serve them asynchronously. Use an asynchronous iterator instead. ``` This is a known warning and does not represent anything bad happening. It's an artifact of the Django framework slowly supporting asynchronous server endpoints and unfortunately isn't practical to remove yet. ## 📚Alternatives to Codex - [Kavita](https://www.kavitareader.com/) has light metadata filtering/editing, supports comics, eBooks, and features for manga. - [Komga](https://komga.org/) has light metadata editing and duplicate page elimination. - [Ubooquity](https://vaemendis.net/ubooquity/) reads both comics and eBooks. ## 🔧 Popular comic utilities - [Mylar](https://github.com/mylar3/mylar3) is the best comic book manager which also has a built in reader. - [Comictagger](https://github.com/comictagger/comictagger) is a comic metadata editor. It comes with a command line and desktop GUI. It will tag identified comics from online database sources. - [Metron Tagger](https://github.com/Metron-Project/metron-tagger) is a command line comic metadata editor. It will tag identified comics from online database sources. - [Comicbox](https://github.com/ajslater/comicbox) is a powerful command line comic metadata editor and multi metadata format synthesizer. It is what Codex uses under the hood to read comic metadata. ## 🤝 Contributing ### 🐛 Bug Reports Issues and feature requests are best filed on the [Github issue tracker](https://github.com/ajslater/codex/issues). ## 💬 Support I and other Codex users answer questions on the [Codex Comic Server Discord](https://discord.gg/CU5kKxv7kg) ### 🛠 Develop Codex's git repo is mirrored on [Github](https://github.com/ajslater/codex/) Codex is a Django Python webserver with a VueJS front end. `/codex/codex/` is the main django app which provides the webserver and database. `/codex/frontend/` is where the vuejs frontend lives. Most of Codex development is now controlled through the Makefile. Type `make` for a list of commands. ## 🔗 Links - [Docker Image](https://github.com/ajslater/codex/pkgs/container/codex) - [PyPi Package](https://pypi.org/project/codex/) - [GitHub Project](https://github.com/ajslater/codex/) ## 🙏🏻 Thanks - Thanks to [Aurélien Mazurie](https://pypi.org/user/ajmazurie/) for allowing me to use the PyPi name 'codex'. - To [ProfessionalTart](https://github.com/professionaltart) for providing native Windows installation instructions. - Thanks to the good people of [#mylar](https://github.com/mylar3/mylar3#live-support--conversation) for continuous feedback and comic ecosystem education. ## 😊 Enjoy ![These simple people have managed to tap into the spiritual forces that mystics and yogis spend literal lifetimes seeking. I feel... ...I feel...](docs/strange.jpg) ================================================ FILE: bin/benchmark-opds.sh ================================================ #!/usr/bin/env bash # benchmark opds url times set -euo pipefail BASE_URL="http://localhost:9810" OPDS_BASE="/opds/v1.2" timeit() { echo "${1}": TEST_PATH="${OPDS_BASE}${2}" echo -e "\t$TEST_PATH" URL="${BASE_URL}${TEST_PATH}" /usr/bin/time -h curl -S -s -o /dev/null "$URL" } timeit "Recently Added:" "/s/0/1?orderBy=created_at&orderReverse=True" #timeit "All Series" "/r/0/1?topGroup=s" ================================================ FILE: bin/build-choices.sh ================================================ #!/usr/bin/env bash # Build json choices for frontend using special script. set -euo pipefail THIS_DIR="$(dirname "$0")/.." cd "$THIS_DIR" || exit 1 export PYTHONPATH="${PYTHONPATH:-}:$THIS_DIR" CHOICES_DIR=frontend/src/choices # rm -rf "${CHOICES_DIR:?}"/* # breaks vite build export UV_NO_DEV=1 uv run codex/choices/choices_to_json.py "$CHOICES_DIR" ================================================ FILE: bin/build-dist.sh ================================================ #!/usr/bin/env bash # Build script for producing a codex python package set -euxo pipefail cd "$(dirname "$0")" export BUILD=1 make collectstatic ./bin/pm check echo "*** build and package application ***" PIP_CACHE_DIR=$(pip3 cache dir) export PIP_CACHE_DIR uv build ================================================ FILE: bin/ci-download-dist-if-identical.sh ================================================ #!/usr/bin/env bash # Download last dist artifacts if current code is identical to the merge source. set -euo pipefail PR_DATA=$(gh pr list --state merged --limit 1 --json headRefName,headRefOid \ --template '{{range .}}{{.headRefName}},{{.headRefOid}}{{end}}') if [[ -z "$PR_DATA" ]]; then echo "No merge detected. Continue with Lint, Test, & Build." exit 0 fi SOURCE_BRANCH="${PR_DATA%,*}" SOURCE_SHA="${PR_DATA#*,}" echo "A merge just happened from $SOURCE_BRANCH." git fetch origin "$SOURCE_BRANCH" --depth=1 if ! git diff --quiet HEAD "origin/$SOURCE_BRANCH"; then echo "Code differs from $SOURCE_BRANCH. Continue with Lint, Test, & Build." exit 0 fi echo "Code is identical to $SOURCE_BRANCH" RUN_ID=$(gh api "repos/${GH_REPO}/actions/runs?head_sha=$SOURCE_SHA&status=success" \ --jq '[.workflow_runs[] | select(.name=="CI")] | .[0].id') if [[ -z "$RUN_ID" || "$RUN_ID" == "null" ]]; then echo "No successful CI run found for commit $SOURCE_SHA" exit 0 fi echo "Found CI run $RUN_ID for commit $SOURCE_SHA" if gh run download "$RUN_ID" --name python-dist --dir dist; then echo "dist_found=true" >> "$GITHUB_OUTPUT" echo "Successfully retrieved dist from run $RUN_ID" else echo "Failed to download python-dist artifact from run $RUN_ID" fi ================================================ FILE: bin/clean-pycache.sh ================================================ #!/usr/bin/env bash # remove all pycache dirs find . -name "__pycache__" -print0 | xargs -0 rm -rf ================================================ FILE: bin/collectstatic.sh ================================================ #!/usr/bin/env bash # Run the django collectstatic command to collect static files from all # locations specified in settings.STATIC_DIRS and place them in # settings.STATIC_ROOT for production builds. set -euo pipefail BUILD=1 ./bin/pm collectstatic --clear --no-input --ignore "rest_framework" ================================================ FILE: bin/create-output-dirs.sh ================================================ #!/usr/bin/env bash # create output directories with correct perms for ci builder docker mounts # circleci only set -euo pipefail mkdir -p -m 777 test-results dist chown -R circleci:circleci test-results dist ================================================ FILE: bin/delete-files.sh ================================================ #!/usr/bin/env bash # Delete all files listed in the delete.txt file set -euo pipefail DEVENV=$1 DELETE_FILE=$DEVENV/delete.txt existing_files=() while IFS= read -r file || [[ -n "$file" ]]; do [[ -z "$file" || "$file" == \#* ]] && continue [[ -f "$file" ]] && existing_files+=("$file") done < "$DELETE_FILE" echo "Deleting ${#existing_files[@]} files..." rm -f -- "${existing_files[@]}" ================================================ FILE: bin/dev-docker.sh ================================================ #!/usr/bin/env bash # Recreate the codex-dev container and enter it with a shell set -euo pipefail docker rm -f codex-dev || true docker compose down docker compose up codex-dev -d ================================================ FILE: bin/dev-module.sh ================================================ #!/usr/bin/env bash # Run a main method in an arbitrary module set -euxo pipefail THIS_DIR="$(dirname "$0")" cd "$THIS_DIR" || exit 1 export PYTHONPATH="${PYTHONPATH:-}:$THIS_DIR" export DEBUG="${DEBUG:-1}" export PYTHONDEVMODE="$DEBUG" export PYTHONDONTWRITEBYTECODE=1 #"$DEBUG" uv run python3 "$@" ================================================ FILE: bin/dev-prod-server.sh ================================================ #!/usr/bin/env bash # run a production-like server export PYTHONPATH="$PYTHONPATH:$THIS_DIR" uv run python3 ./codex/run.py ================================================ FILE: bin/dev-reverse-proxy.sh ================================================ #!/usr/bin/env bash # Run an nginx reverse proxy with a subpath for development testing set -euo pipefail cd "$(dirname "$0")/nginx" || exit 1 docker-compose -f nginx.yaml up ================================================ FILE: bin/dev-server.sh ================================================ #!/usr/bin/env bash # Run the codex server set -euxo pipefail THIS_DIR="$(dirname "$0")/.." cd "$THIS_DIR" || exit 1 export DEBUG="${DEBUG:-1}" export PYTHONDEBUG=1 export PYTHONDEVMODE="$DEBUG" export PYTHONDONTWRITEBYTECODE=1 export PYTHONPATH="${PYTHONPATH:-}:$THIS_DIR" export PYTHONWARNINGS=always #export CODEX_THROTTLE_OPDS=10 #export CODEX_THROTTLE_USER=10 export DJANGO_SETTINGS_MODULE=codex.settings #uv run python3 -X tracemalloc ./codex/run.py #uv run righttyper --all-files --overwrite codex/run.py uv run python3 ./codex/run.py ================================================ FILE: bin/dev-ttabs.sh ================================================ #!/usr/bin/env bash # Open development server processes in macOS terminal tabs # Requires npm ttab set -euo pipefail # The Vue dev server ttab -t "Codex Vue" "make dev-frontend-server" # The API server make dev-server ================================================ FILE: bin/docker-compose-exit.sh ================================================ #!/usr/bin/env bash # Run a docker compose service and return its exit code set -euo pipefail SERVICE=$1 # docker compose without the dash doesn't have the exit-code-from param docker compose up --exit-code-from "$SERVICE" "$SERVICE" ================================================ FILE: bin/docker-tag-latest.sh ================================================ #!/usr/bin/env bash # Tag old version as latest set -euo pipefail if [[ "$#" -lt 3 ]]; then echo "Usage: $0 " echo "Example: $0 ghcr.io ajslater/codex 1.10.3" exit 1 fi # Configuration REGISTRY=$1 IMAGE_NAME=$2 SOURCE_TAG=$3 TARGET_TAG="latest" # Ensure DOCKER_PASS and DOCKER_USER are set in your environment if [[ -z "$DOCKER_PASS" || -z "$DOCKER_USER" ]]; then echo "Error: DOCKER_PASS and DOCKER_USER environment variables must be set." exit 1 fi # 1. Log in to registry echo "Logging in to $REGISTRY..." echo "$DOCKER_PASS" | docker login "$REGISTRY" -u "$DOCKER_USER" --password-stdin # 2. Retag the multi-arch image # This creates a new manifest on the registry side without downloading image layers echo "Tagging $IMAGE_NAME:$SOURCE_TAG as $TARGET_TAG..." docker buildx imagetools create \ --tag "$REGISTRY/$IMAGE_NAME:$TARGET_TAG" \ "$REGISTRY/$IMAGE_NAME:$SOURCE_TAG" # shellcheck disable=SC2181 if [ $? -eq 0 ]; then echo "Successfully updated $TARGET_TAG" else echo "Failed to update tag." exit 1 fi ================================================ FILE: bin/fix-docker.sh ================================================ #!/usr/bin/env bash # Fix common linting errors with docker set -euxo pipefail ####################### ###### Dockerfile ##### ####################### mapfile -t dockerfiles < <(find . -type f -name '*Dockerfile' -print -quit) if [ ${#dockerfiles[@]} -gt 0 ]; then dockerfmt --write "${dockerfiles[@]}" fi ================================================ FILE: bin/fix-python.sh ================================================ #!/usr/bin/env bash # Fix common linting errors set -euxo pipefail # Python uv run --group lint ruff check --fix . uv run --group lint ruff format . ================================================ FILE: bin/fix.sh ================================================ #!/usr/bin/env bash # Fix common linting errors set -euxo pipefail ##################### ###### Makefile ##### ##################### uv run mbake format Makefile cfg/*.mk ################ # Ignore files # ################ bin/sort-ignore.sh ############################################ ##### Javascript, JSON, Markdown, YAML ##### ############################################ bun run fix ################### ###### Shell ###### ################### shellharden --replace ./**/*.sh ================================================ FILE: bin/icons_transform.py ================================================ #!/usr/bin/env python """Generate production icons from svg sources.""" import shutil import subprocess from pathlib import Path from types import MappingProxyType from cairosvg import svg2png TOP_PATH = Path(__file__).parent.parent SRC_IMG_PATH = TOP_PATH / Path("img") STATIC_IMG_PATH = TOP_PATH / Path("codex/static_src/img") INKSCAPE_PATH = Path("/Applications/Inkscape.app/Contents/MacOS/inkscape") _COVER_RATIO = 1.5372233400402415 ICONS = MappingProxyType( { "logo": (32, 32), "logo-maskable": (180, 180), "missing-cover": (165, round(165 * _COVER_RATIO)), "publisher": (), "imprint": (), "series": (), "volume": (), "folder": (), "story-arc": (), } ) def create_maskable_icon(input_path): """Create logo-maskable.svg from logo.svg by editing the XML.""" with input_path.open("r") as f: lines = f.readlines() modified_lines = [] for line in lines: modified_lines.append(line) if 'inkscape:label="logo"' in line: modified_lines.append(' transform="matrix(0.80,0,0,0.80,51.5,51.5)"') output_path = SRC_IMG_PATH / "logo-maskable.svg" with output_path.open("w") as f: f.writelines(modified_lines) def inkscape(input_path, export_path, width, height): """Transform svgs with xlinks into pngs.""" # Needed because cairosvg doesn't support xlinks # https://github.com/Kozea/CairoSVG/issues/163 args = ( INKSCAPE_PATH, f"--export-width={width}", f"--export-height={height}", f"--export-filename={export_path}", input_path, ) subprocess.run(args, check=False) # noqa: S603 def transform_icon(name, size): """Transform svgs into optimized svgs and pngs.""" svg_name = name + ".svg" input_svg_path = SRC_IMG_PATH / svg_name output_svg_path = STATIC_IMG_PATH / svg_name input_svg_mtime = input_svg_path.stat().st_mtime do_gen_svg = ( not output_svg_path.exists() or output_svg_path.stat().st_mtime < input_svg_mtime ) if do_gen_svg: if name == "logo": create_maskable_icon(input_svg_path) (SRC_IMG_PATH / "missing-cover.svg").touch() shutil.copy(input_svg_path, output_svg_path) if not size: return width, height = size output_png_name = f"{name}-{width}" output_png_path = STATIC_IMG_PATH / (output_png_name + ".png") output_webp_path = STATIC_IMG_PATH / (output_png_name + ".webp") do_gen_png = ( not output_webp_path.exists() or output_webp_path.stat().st_mtime < input_svg_mtime ) if do_gen_png: if name == "missing-cover": inkscape(input_svg_path, output_png_path, width, height) else: svg2png( url=str(input_svg_path), write_to=str(output_png_path), output_width=width, output_height=height, ) def picopt(): """Optimize output with picopt.""" args = ("picopt", "-rtx", "SVG", "-c", "WEBP", STATIC_IMG_PATH) subprocess.run(args, check=False) # noqa: S603 def main(): """Create all icons.""" for name, size in ICONS.items(): transform_icon(name, size) picopt() if __name__ == "__main__": main() ================================================ FILE: bin/kill-codex.sh ================================================ #!/usr/bin/env bash # kill all codex processes set -euo pipefail pkill -9 -f 'codex/run.py' ================================================ FILE: bin/kill-eslint_d.sh ================================================ #!/usr/bin/env bash # eslint_d can get into a bad state if git switches branches underneath it bunx eslint_d stop pkill eslint_d rm -f .eslintcache ================================================ FILE: bin/lint-ci.sh ================================================ #!/usr/bin/env bash # Lint checks for ci set -euxo pipefail if [ "$(uname)" != "Darwin" ]; then exit 0 fi if [ -f .github/workflows/ci.yml ]; then actionlint .github/workflows/ci.yml fi if [ -f .circleci/config.yml ]; then circleci config validate .circleci/config.yml fi ================================================ FILE: bin/lint-complexity.sh ================================================ #!/usr/bin/env bash # Lint complexity set -euo pipefail if [ "$(uname)" != "Darwin" ]; then exit 0 fi uv run --group lint complexipy uv run --group lint radon mi --min B . uv run --group lint radon cc --min C . ================================================ FILE: bin/lint-darwin.sh ================================================ #!/usr/bin/env bash # Lint checks set -euxo pipefail if [ "$(uname)" != "Darwin" ]; then exit 0 fi shellharden --check ./**/*.sh # subdirs aren't copied into docker builder # .env files aren't copied into docker shellcheck --external-sources ./**/*.sh ================================================ FILE: bin/lint-docker.sh ================================================ #!/usr/bin/env bash # Lint checks for docker set -euxo pipefail if [ "$(uname)" != "Darwin" ]; then exit 0 fi mapfile -t dockerfiles < <(find . -type f -name '*Dockerfile' -print -quit) if [ ${#dockerfiles[@]} -gt 0 ]; then hadolint "${dockerfiles[@]}" dockerfmt --check "${dockerfiles[@]}" fi ================================================ FILE: bin/lint-python.sh ================================================ #!/usr/bin/env bash # Lint checks set -euxo pipefail #################### ###### Python ###### #################### uv run --group lint ruff check . uv run --group lint ruff format --check . make typecheck uv run --group lint vulture . bin/lint-complexity.sh uv run --group lint codespell . ================================================ FILE: bin/lint.sh ================================================ #!/usr/bin/env bash # Lint checks set -euxo pipefail uv run mbake validate Makefile cfg/*.mk # Javascript, JSON, Markdown, YAML ##### bun run lint bin/lint-darwin.sh uv run bin/roman.py -i .prettierignore . ================================================ FILE: bin/localize-db.sh ================================================ #!/usr/bin/env bash # copy old database a localize set -euo pipefail REMOTE_DB=$1 LOCAL_LIB_PATH=$2 ROOT_PATH=$(realpath "$(dirname "$0")/..") DB_PATH=$ROOT_PATH/config/codex.sqlite3 SQL_PATH=$(dirname "$0")/localize_library.sql rm -f "$DB_PATH"-* cp "$REMOTE_DB" "$DB_PATH" SQL_DECLARE="DECLARE @LOCAL_LIB_PATH VARCHAR; SET @LOCAL_LIB_PATH = '$LOCAL_LIB_PATH';" echo "$SQL_DECLARE" | cat - <("$SQL_PATH") | sqlite "$DB_PATH" ================================================ FILE: bin/localize_library.sql ================================================ UPDATE codex_library SET path = REPLACE(path, '/comics', @LOCAL_LIB_PATH) WHERE path LIKE '/comics%'; UPDATE codex_failedimport SET path = REPLACE(path, '/comics', @LOCAL_LIB_PATH) WHERE path LIKE '/comics%'; UPDATE codex_comic SET path = REPLACE(path, '/comics', @LOCAL_LIB_PATH) WHERE path LIKE '/comics%'; ================================================ FILE: bin/manage.py ================================================ #!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run the server.""" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "codex.settings") try: from django.core.management import execute_from_command_line except ImportError as exc: reason = ( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise ImportError(reason) from exc execute_from_command_line(sys.argv) if __name__ == "__main__": main() ================================================ FILE: bin/pm ================================================ #!/usr/bin/env bash # Convenience script for running django manage tasks with uv set -euo pipefail export PYTHONPATH=. uv run python3 bin/manage.py "$@" ================================================ FILE: bin/prettier-nginx.sh ================================================ #!/usr/bin/env bash # Run prettier on nginx files because overrides doesn't work yet. set -euxo pipefail CONFIG_DIR=nginx/http.d if [ -d "$CONFIG_DIR" ]; then prettier --parser nginx "$CONFIG_DIR/*.conf" "$@" fi ================================================ FILE: bin/roman.py ================================================ #!/usr/bin/env python3 """ Check shell scripts recursively for a descriptive comment on line 2. Detects shell scripts by shebang. Inspired by @defunctzombie """ from __future__ import annotations import re import sys from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter from pathlib import Path from typing import TYPE_CHECKING from pathspec import PathSpec if TYPE_CHECKING: from collections.abc import Generator, Sequence # --------------------------------------------------------------------------- # Constants # --------------------------------------------------------------------------- SHELL_SHEBANG_PATTERN: re.Pattern[str] = re.compile(r"^#!.*sh") # Patterns that are always excluded regardless of an ignore file. DEFAULT_EXCLUDE_PATTERNS: list[str] = [ ".*", # hidden files / directories "*~", # editor backup files ] COMMENT_PATTERN: re.Pattern[str] = re.compile(r"^#+.{4}") # Number of bytes to read when sniffing the shebang — avoids loading huge # binary files into memory. SHEBANG_READ_BYTES: int = 512 # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- def build_ignore_spec(ignore_path: Path | None) -> PathSpec: """Return a gitignore style PathSpec built from *ignore_path* plus the built-in defaults.""" lines: list[str] = list(DEFAULT_EXCLUDE_PATTERNS) if ignore_path is not None: lines += ignore_path.read_text(encoding="utf-8").splitlines() return PathSpec.from_lines("gitwildmatch", lines) def read_first_two_lines(path: Path) -> tuple[str, str]: """Return the first two lines of *path* as a (line1, line2) tuple.""" try: raw = path.read_bytes()[:SHEBANG_READ_BYTES] text = raw.decode("utf-8", errors="replace") except OSError: return "", "" lines = text.splitlines() line1 = lines[0] if len(lines) > 0 else "" line2 = lines[1] if len(lines) > 1 else "" return line1, line2 def is_shell_script(line1: str) -> bool: """Return True when *line1* looks like a shell shebang.""" return bool(SHELL_SHEBANG_PATTERN.search(line1)) def has_description_comment(line2: str) -> bool: """Return True when *line2* starts with a ``# `` comment.""" return bool(COMMENT_PATTERN.match(line2)) def iter_files(path_strs: Sequence[str], spec: PathSpec) -> Generator[Path]: """ Yield every file under *roots* that is not excluded by *spec*. Each candidate path is tested relative to the root it was found under so that gitignore-style directory patterns (e.g. ``vendor/``) work correctly. """ for path_str in path_strs: path = Path(path_str) if not path.exists(): print(f"👎 Path does not exist: {path}", file=sys.stderr) # noqa: T201 sys.exit(2) root = Path(path).resolve() if root.is_file(): rel = Path(root.name) if not spec.match_file(str(rel)): yield root continue for sub_path in sorted(root.rglob("*")): if not sub_path.is_file(): continue try: rel = sub_path.relative_to(root) except ValueError: rel = sub_path # Match against each component so directory patterns work if spec.match_file(str(rel)): continue yield sub_path # --------------------------------------------------------------------------- # CLI # --------------------------------------------------------------------------- def build_parser() -> ArgumentParser: """Build cli arg parser.""" parser = ArgumentParser( description="Find shell scripts that are missing a descriptive comment on line 2.", formatter_class=RawDescriptionHelpFormatter, epilog=( "Exit status: 0 if all shell scripts pass, 1 if any are missing\n" "a comment on line 2, 2 on usage / IO errors." ), ) parser.add_argument( "paths", nargs="+", metavar="PATH", help="Files or directories to examine.", ) parser.add_argument( "-i", "--ignore-file", metavar="FILE", help="Ignore-file with gitignore-style patterns (e.g. .zombieignore).", ) return parser def _parse_ignore_file(args: Namespace) -> PathSpec: ignore_path: Path | None = None if args.ignore_file: ignore_path = Path(args.ignore_file) if not ignore_path.is_file(): print(f"👎 Can't read ignore file: {ignore_path}", file=sys.stderr) # noqa: T201 sys.exit(2) try: return build_ignore_spec(ignore_path) except OSError as exc: print(f"👎 Failed to parse ignore file: {exc}", file=sys.stderr) # noqa: T201 sys.exit(2) def main() -> None: """Run program.""" parser = build_parser() args = parser.parse_args() spec = _parse_ignore_file(args) offenders: list[Path] = [] for path in iter_files(args.paths, spec): line1, line2 = read_first_two_lines(path) if not is_shell_script(line1): continue if not has_description_comment(line2): print(f"🔪 {path}") # noqa: T201 offenders.append(path) if offenders: print( # noqa: T201 f"\n{len(offenders)} script(s) missing a description comment.", file=sys.stderr, ) sys.exit(1) print("👍") # noqa: T201 if __name__ == "__main__": main() ================================================ FILE: bin/sort-ignore.sh ================================================ #!/usr/bin/env bash # Sort all ignore files in place and remove duplicates # Set locale to make output deterministic across shells export LC_ALL=en_US.UTF-8 for f in .*ignore; do if [ ! -L "$f" ]; then sort --mmap --unique --output="$f" "$f" echo "$f" sorted fi done ================================================ FILE: bin/test-python.sh ================================================ #!/usr/bin/env bash # Run all tests set -euxo pipefail mkdir -p test-results # LOGLEVEL=DEBUG uv run --group test righttyper --all-files --overwrite --output-files --python-version 3.10 -m pytest "$@" LOGLEVEL=DEBUG uv run --group test pytest "$@" # pytest-cov leaves .coverage.$HOST.$PID.$RAND files around while coverage itself doesn't uv run --group test coverage erase || true ================================================ FILE: bin/uml.sh ================================================ #!/usr/bin/env bash # Create UML diagram set -euo pipefail PACKAGE=$(uv run toml get --toml-path=pyproject.toml project.name) uvx --from pylint pyreverse -o png "$PACKAGE" ================================================ FILE: bin/update-deps-node.sh ================================================ #!/usr/bin/env bash # Update bun dependencies set -euo pipefail bun update bun outdated || true ================================================ FILE: bin/update-deps-python.sh ================================================ #!/usr/bin/env bash # Update python dependencies set -euo pipefail uv sync --no-install-project --all-extras --all-groups --all-packages --upgrade uv tree --all-groups --depth 1 --upgrade --outdated | grep --color=always "(latest:.*)" || true ================================================ FILE: bin/vendor-diff-package.sh ================================================ #!/usr/bin/env bash # Find the diffs for two vendored packages. # vendor the original package into codex/_vendor_orig before comparing edits # Would be slicker if this automated the creation and destruction of _vendor-orig in /tmp set -euo pipefail PKG=$1 MODULE=$2 VENDOR_TARGET=/tmp/_vendor_orig rm -rf "$VENDOR_TARGET" mkdir -p "$VENDOR_TARGET" cd cache # vendorize the original in a tmp dir cat << EOT > vendorize.toml target = "$VENDOR_TARGET" packages = [ "$PKG" ] EOT uv run python-vendorize # compare DIFF_FN="../codex/_vendor/$PKG.diff" echo "# Non automated/import patches to $PKG" > "$DIFF_FN" diff --minimal --recursive --suppress-common-lines \ -x "*~" \ -x "*.pyc" \ -x "*__pycache__*" \ "$VENDOR_TARGET/$MODULE" \ "../codex/_vendor/$MODULE" \ | rg -v "Binary|Only" >> "$DIFF_FN" # cleanup rm -rf "$VENDOR_TARGET" rm -f vendorize.toml ================================================ FILE: bin/vendor-patch-imports.sh ================================================ #!/usr/bin/env bash # Replace relative imports with direct vendor imports set -euo pipefail MODULE=$1 MODULE_DIR="codex/_vendor/$MODULE" find "$MODULE_DIR" -name "*.py" -print0 | xargs -0 sed -ri '' "s/from \.+$MODULE/from codex._vendor.$MODULE/g" ================================================ FILE: bin/version-node.sh ================================================ #!/usr/bin/env bash # Get version or set version in Frontend & API. set -euo pipefail VERSION="${1:-}" if [ "$VERSION" = "" ]; then if [ -d frontend ]; then cd frontend node -e "const {name, version} = require('./package.json'); console.log(name, version);" fi else if [ -d frontend ]; then cd frontend bunx npm version --allow-same-version "$VERSION" fi fi ================================================ FILE: bin/version-python.sh ================================================ #!/usr/bin/env bash # Get version or set version for python. set -euo pipefail VERSION="${1:-}" if [ "$VERSION" = "" ]; then uv version else uv version "$VERSION" fi ================================================ FILE: cfg/ci.mk ================================================ DEVENV_CI := 1 export DEVENV_CI .PHONY: lint ## Lint ci errors ## @category Lint lint:: bin/lint-ci.sh ================================================ FILE: cfg/codex.mk ================================================ .PHONY: install ## Configure wheel building for Darwin ## @category Install install:: BREW_PREFIX=$(brew --prefix) export LDFLAGS="-L${BREW_PREFIX}/opt/openssl@3/lib" export CPPFLAGS="-I${BREW_PREFIX}/opt/openssl@3/include" export PKG_CONFIG_PATH="${BREW_PREFIX}/opt/openssl@3/lib/pkgconfig" .PHONY: test-frontend ## Run frontend test with dependencies ## @category Test test-frontend:: build-choices .PHONY: dev-ttabs ## Run the vite dev frontend and dev-server in ttabs ## @category Run Server dev-ttabs: ./bin/dev-ttabs.sh .PHONY: dev-reverse-proxy ## Run an nginx reverse proxy to codex in docker ## @category Run Server dev-reverse-proxy: ./bin/dev-reverse-proxy.sh ## Module to run ## @category Run Server M := .PHONY: dev-module ## Run a single codex module in dev mode ## @category Run Server dev-module: ./bin/dev-module.sh $(M) .PHONY: build-choices ## Build JSON choices for frontend ## @category Build build-choices: ./bin/build-choices.sh .PHONY: build-icons ## Build all icons from source ## @category Build build-icons: uv run --group build bin/icons_transform.py .PHONY: build ## Build codex dependencies ## @category Build build:: build-choices build-icons ================================================ FILE: cfg/common.mk ================================================ SHELL := /usr/bin/env bash DEVENV_SRC ?= ../devenv # export DEVENV_SRC DEVENV_COMMON := 1 export DEVENV_COMMON .PHONY: clean ## Clean caches ## @category Clean clean:: rm -rf .*cache .PHONY: update-devenv ## Update development environment ## @category Update update-devenv: $(DEVENV_SRC)/scripts/update_devenv.py .PHONY: fix ## Fix lint errors ## @category Fix fix:: ./bin/fix.sh .PHONY: lint ## Lint ## @category Lint lint:: ./bin/lint.sh .PHONY: news ## Show recent NEWS ## @category Deploy news: head -40 NEWS.md ================================================ FILE: cfg/django.mk ================================================ DEVENV_DJANGO := 1 export DEVENV_DJANGO .PHONY: fix ## Fix django lint errors in templates ## @category Fix fix:: uv run --group lint djlint --reformat **/templates/**/*.html .PHONY: lint ## Lint django templates ## @category Lint lint:: uv run --group lint djlint --lint **/templates/**/*.html .PHONY: django-check ## Django check ## @category Test django-check: bin/pm check .PHONY: dev-server ## Run the dev webserver ## @category Serve dev-server: ./bin/dev-server.sh .PHONY: dev-prod-server ## Run a bundled production webserver ## @category Run Server dev-prod-server: build-frontend collectstatic ./bin/dev-prod-server.sh .PHONY: collectstatic ## Collect static files for django ## @category Build collectstatic: build-icons build-frontend bin/collectstatic.sh .PHONY: build-only ## Build python package ## @category Build build-only: uv build .PHONY: build ## Build python package ## @category Build build:: collectstatic build-only ================================================ FILE: cfg/docker.mk ================================================ DEVENV_DOCKER := 1 export DEVENV_DOCKER .PHONY: fix ## Fix docker lint errors ## @category Fix fix:: bin/fix-docker.sh .PHONY: lint ## Lint docker files ## @category Lint lint:: bin/lint-docker.sh ================================================ FILE: cfg/docs.mk ================================================ DEVENV_DOCS := 1 export DEVENV_DOCS .PHONY: docs ## Build doc site ## @category Docs docs: uv run --only-group docs --no-dev mkdocs build --strict --site-dir docs/site .PHONY: docs-server ## Run the docs server ## @category Docs docs-server: uv run --only-group docs --no-dev mkdocs serve --open --dirty ================================================ FILE: cfg/eslint.config.base.js ================================================ import eslintJs from "@eslint/js"; import eslintJson from "@eslint/json"; import eslintPluginComments from "@eslint-community/eslint-plugin-eslint-comments/configs"; import eslintPluginStylistic from "@stylistic/eslint-plugin"; import { defineConfig } from "eslint/config"; import eslintConfigPrettier from "eslint-config-prettier"; import eslintPluginCompat from "eslint-plugin-compat"; import eslintPluginDeMorgan from "eslint-plugin-de-morgan"; import eslintPluginDepend from "eslint-plugin-depend"; import eslintPluginHtml from "eslint-plugin-html"; import eslintPluginImport from "eslint-plugin-import-x"; import eslintPluginMath from "eslint-plugin-math"; import * as eslintPluginMdx from "eslint-plugin-mdx"; import eslintPluginNoSecrets from "eslint-plugin-no-secrets"; import eslintPluginNoUnsanitized from "eslint-plugin-no-unsanitized"; import eslintPluginNoUseExtendNative from "eslint-plugin-no-use-extend-native"; import eslintPluginPerfectionist from "eslint-plugin-perfectionist"; import eslintPluginPrettierRecommended from "eslint-plugin-prettier/recommended"; import eslintPluginPromise from "eslint-plugin-promise"; import eslintPluginRegexp from "eslint-plugin-regexp"; import eslintPluginSecurity from "eslint-plugin-security"; import eslintPluginSonarjs from "eslint-plugin-sonarjs"; import eslintPluginToml from "eslint-plugin-toml"; import eslintPluginUnicorn from "eslint-plugin-unicorn"; import eslintPluginYml from "eslint-plugin-yml"; import globals from "globals"; export const FLAT_ALL = "flat/all"; export const FLAT_RECOMMENDED = "flat/recommended"; export const CONFIGS = { js: { ...eslintJs.configs.recommended, ...eslintPluginComments.recommended, ...eslintPluginCompat.configs[FLAT_RECOMMENDED], ...eslintPluginDeMorgan.configs.recommended, ...eslintPluginDepend.configs[FLAT_RECOMMENDED], ...eslintPluginImport.flatConfigs.all, ...eslintPluginMath.configs.recommended, ...eslintPluginNoUnsanitized.configs.recommended, ...eslintPluginPerfectionist.configs["recommended-natural"], ...eslintPluginPromise.configs[FLAT_ALL], ...eslintPluginRegexp.configs.all, ...eslintPluginSonarjs.configs.all, ...eslintPluginUnicorn.configs.all, plugins: { depend: eslintPluginDepend, sonarjs: eslintPluginSonarjs, unicorn: eslintPluginUnicorn, }, languageOptions: { ecmaVersion: "latest", }, rules: { "@stylistic/multiline-comment-style": "off", // Multiple bugs with this rule // "import-x/order": "off", "max-params": ["warn", 4], "no-console": "warn", "no-debugger": "warn", "no-secrets/no-secrets": "error", "security/detect-object-injection": "off", }, }, }; Object.freeze(CONFIGS); export default defineConfig([ { name: "globalIgnores", ignores: [ "!.circleci", "**/*min.css", "**/*min.js", "**/__pycache__/", "**/node_modules/", "**/package-lock.json", "*~", ".claude", ".git/", ".*cache/", ".venv/", "dist/", "uv.lock", "test-results/", "typings/", ], }, eslintPluginNoUseExtendNative.configs.recommended, eslintPluginSecurity.configs.recommended, eslintPluginStylistic.configs.all, eslintPluginPrettierRecommended, { languageOptions: { globals: { ...globals.node, }, }, linterOptions: { reportUnusedDisableDirectives: "warn", }, plugins: { "no-secrets": eslintPluginNoSecrets, perfectionist: eslintPluginPerfectionist, }, rules: { "prettier/prettier": "warn", }, }, { files: ["**/*.html"], plugins: { html: eslintPluginHtml }, }, { files: ["**/*.js"], ...CONFIGS.js, }, { files: ["**/*.json", "**/*.md/*.json"], plugins: { json: eslintJson, }, ...eslintJson.configs.recommended, language: "json/json", }, { files: ["package.json"], languageOptions: { parser: "jsonc-eslint-parser", }, plugins: { depend: eslintPluginDepend }, rules: { "depend/ban-dependencies": "error", }, }, { files: ["**/*.{md,mdx}"], ...eslintPluginMdx.flat, ...eslintPluginMdx.flatCodeBlocks, processor: eslintPluginMdx.createRemarkProcessor({ lintCodeBlocks: true, }), rules: { "no-undef": "off", "no-unused-vars": "off", "prettier/prettier": ["warn", { parser: "markdown" }], }, }, ...eslintPluginToml.configs.recommended, { files: ["**/*.toml", "**/*.md/*.toml"], rules: { "prettier/prettier": ["error", { parser: "toml" }], }, }, ...eslintPluginYml.configs.standard, ...eslintPluginYml.configs.prettier, { files: ["**/*.yaml", "**/*.yml", "**/*.md/*.yaml", "**/*.md/*.yml"], rules: { "prettier/prettier": ["error", { parser: "yaml" }], }, }, { files: ["**/certbot.yaml", "**/compose*.yaml", "**/.*_treestamps.yaml"], rules: { "yml/no-empty-mapping-value": "off", }, }, eslintConfigPrettier, // Best if last ]); ================================================ FILE: cfg/frontend.mk ================================================ DEVENV_FRONTEND := 1 export DEVENV_FRONTEND .PHONY: clean-frontend ## Clean frontend ## @category Clean clean-frontend: cd frontend && make clean .PHONY: clean ## Clean frontend too ## @category Clean clean:: clean-frontend .PHONY: install-frontend ## Install frontend ## @category Install install-frontend: cd frontend && make install .PHONY: install ## Install with all extras ## @category Install install:: install-frontend .PHONY: update-frontend ## Update deps for frontend ## @category Update update-frontend: cd frontend && make update .PHONY: update ## Update deps for frontend too ## @category Update update:: update-frontend .PHONY: fix-frontend ## Fix only frontend lint errors ## @category Lint fix-frontend: bash -c "cd frontend && make fix" .PHONY: fix ## Fix lint errors ## @category Lint fix:: fix-frontend .PHONY: lint-frontend ## Lint the frontend ## @category Lint lint-frontend: bash -c "cd frontend && make lint" .PHONY: lint-frontend ## Lint ## @category Lint lint:: lint-frontend .PHONY: dev-frontend-server ## Run the vite dev frontend ## @category Run dev-frontend-server: bash -c "cd frontend && make dev-server" .PHONY: test-frontend ## Run frontend tests ## @category Test test-frontend:: cd frontend && make test .PHONY: test-frontend ## Run frontend tests too ## Test ## @category Test test:: test-frontend .PHONY: build-frontend ## Build frontend ## @category Build build-frontend: cd frontend && make build .PHONY: build ## Build with frontend ## @category Build build:: build-frontend ================================================ FILE: cfg/help.mk ================================================ # Inspired from # https://github.com/Mischback/django-calingen/blob/3f0e6db6/Makefile # and https://gist.github.com/klmr/575726c7e05d8780505a # fancy colors cyan := "$$(tput setaf 6)" green := "$$(tput setaf 2)" red := "$$(tput setaf 1)" yel := "$$(tput setaf 3)" gray := "$$(tput setaf 8)" grayb := "$$(printf "\033[1m"; tput setaf 8)" end := "$$(tput sgr0)" TARGET_STYLED_HELP_NAME = "$(cyan)TARGET$(end)" ARGUMENTS_HELP_NAME = "$(green)ARGUMENT$(end)=$(red)VALUE$(end)" # This mountrous sed is compatible with both GNU sed and BSD sed (for macOS). # That's why "-E", "|", "+", "\s", "?", and "\t" aren't used. See the details # about BSD sed vs. GNU sed: https://riptutorial.com/sed/topic/9436 target_regex := [a-zA-Z0-9%_\/%-][a-zA-Z0-9%_\/%-]* variable_regex := [^:= ][^:= ]* variable_assignment_regex := [ ]*:*[+:!\?]*= * value_regex := .* category_annotation_regex := @category * category_regex := [^<][^<]* # We first parse and markup with these ad-hoc tags, and then we turn the markup # into a colorful output. target_tag_start := target_tag_end := target_variable_tag_start := target_variable_tag_end := variable_tag_start := variable_tag_end := global_variable_tag_start := global_variable_tag_end := value_tag_start := value_tag_end := prerequisites_tag_start := prerequisites_tag_end := doc_tag_start := doc_tag_indented_start := doc_tag_indented_end := doc_tag_end := category_tag_start := category_tag_end := default_category_tag_start := default_category_tag_end := DEFAULT_CATEGORY = General .DEFAULT_GOAL := help .PHONY: help help: @echo "Usage: make [$(TARGET_STYLED_HELP_NAME) [$(TARGET_STYLED_HELP_NAME) ...]] [$(ARGUMENTS_HELP_NAME) [$(ARGUMENTS_HELP_NAME) ...]]" @cat ${MAKEFILE_LIST} \ | tr '\t' ' ' \ | sed -n -e "/^## / { \ h; \ s/.*/##/; \ :doc" \ -e "H; \ n; \ s|^## *\(.*\)|$(doc_tag_start)$(doc_tag_indented_start)\1$(doc_tag_indented_end)$(doc_tag_end)|; \ s|^## *\(.*\)|$(doc_tag_start)\1$(doc_tag_end)|; \ t doc" \ -e "s| *#[^#].*||; " \ -e "s|^\(define *\)\($(variable_regex)\)$(variable_assignment_regex)\($(value_regex)\)|$(global_variable_tag_start)\2$(global_variable_tag_end)$(value_tag_start)\3$(value_tag_end)|;" \ -e "s|^\($(variable_regex)\)$(variable_assignment_regex)\($(value_regex)\)|$(global_variable_tag_start)\1$(global_variable_tag_end)$(value_tag_start)\2$(value_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\(\($(variable_regex)\)$(variable_assignment_regex)\($(value_regex)\)\)|$(target_variable_tag_start)\1$(target_variable_tag_end)$(variable_tag_start)\3$(variable_tag_end)$(value_tag_start)\4$(value_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\($(target_regex)\( *$(target_regex)\)*\) *\(\| *\( *$(target_regex)\)*\)|$(target_tag_start)\1$(target_tag_end)$(prerequisites_tag_start)\2$(prerequisites_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\($(target_regex)\( *$(target_regex)\)*\)|$(target_tag_start)\1$(target_tag_end)$(prerequisites_tag_start)\2$(prerequisites_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\(\| *\( *$(target_regex)\)*\)|$(target_tag_start)\1$(target_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *|$(target_tag_start)\1$(target_tag_end)|;" \ -e " \ G; \ s|## *\(.*\) *##|$(doc_tag_start)\1$(doc_tag_end)|; \ s|\\n||g;" \ -e "/$(category_annotation_regex)/!s|.*|$(default_category_tag_start)$(DEFAULT_CATEGORY)$(default_category_tag_end)&|" \ -e "s|^\(.*\)$(doc_tag_start)$(category_annotation_regex)\($(category_regex)\)$(doc_tag_end)|$(category_tag_start)\2$(category_tag_end)\1|" \ -e "p; \ }" \ | sort \ | sed -n \ -e "s|$(default_category_tag_start)|$(category_tag_start)|" \ -e "s|$(default_category_tag_end)|$(category_tag_end)|" \ -e "{G; s|\($(category_tag_start)$(category_regex)$(category_tag_end)\)\(.*\)\n\1|\2|; s|\n.*||; H; }" \ -e "s|$(category_tag_start)||" \ -e "s|$(category_tag_end)|:\n|" \ -e "s|$(target_variable_tag_start)|$(target_tag_start)|" \ -e "s|$(target_variable_tag_end)|$(target_tag_end)|" \ -e "s|$(target_tag_start)| $(cyan)|" \ -e "s|$(target_tag_end)|$(end) |" \ -e "s|$(prerequisites_tag_start).*$(prerequisites_tag_end)||" \ -e "s|$(variable_tag_start)|$(green)|g" \ -e "s|$(variable_tag_end)|$(end)|" \ -e "s|$(global_variable_tag_start)| $(green)|g" \ -e "s|$(global_variable_tag_end)|$(end)|" \ -e "s|$(value_tag_start)| (default: $(red)|" \ -e "s|$(value_tag_end)|$(end))|" \ -e "s|$(doc_tag_indented_start)|$(grayb)|g" \ -e "s|$(doc_tag_indented_end)|$(end)|g" \ -e "s|$(doc_tag_start)|\n |g" \ -e "s|$(doc_tag_end)||g" \ -e "p" ================================================ FILE: cfg/node.mk ================================================ DEVENV_NODE := 1 export DEVENV_NODE .PHONY: install-deps-node ## Update and install node packages ## @category Install install-deps-node: bun install .PHONY: install ## Install ## @category Install install:: install-deps-node .PHONY: update-node ## Update node dependencies ## @category Update update-node: ./bin/update-deps-node.sh .PHONY: update ## Update dependencies ## @category Update update:: update-node .PHONY: kill-eslint_d ## Kill eslint daemon ## @category Lint kill-eslint_d: bin/kill-eslint_d.sh ## Show version. Use V variable to set version ## @category Update V := .PHONY: version ## Show or set project version for node ## @category Update version:: bin/version-node.sh $(V) ================================================ FILE: cfg/node_root.mk ================================================ DEVENV_NODE_ROOT := 1 export DEVENV_NODE_ROOT # Dummy target for mbake linting .PHONY: all all: ; ================================================ FILE: cfg/python.mk ================================================ DEVENV_PYTHON := 1 export DEVENV_PYTHON .PHONY: clean ## Clean python caches ## @category Clean clean:: find . -name "__pycache__" -print0 | xargs -0 rm -rf rm -rf .coverage .PHONY: install-deps-pip ## Update pip and install node packages ## @category Install install-deps-pip: pip install --upgrade pip .PHONY: install-prod ## Install for production ## @category Install install-prod: install-deps-pip uv sync --no-install-project --no-dev .PHONY: install ## Install with dev and all extras and groups ## @category Install install:: install-deps-pip uv sync --no-install-project --all-extras --all-groups --all-packages .PHONY: update-python ## Update python dependencies ## @category Update update-python: ./bin/update-deps-python.sh .PHONY: update ## Update dependencies ## @category Update update:: update-python ## Show version. Use V variable to set version ## @category Update V := .PHONY: version ## Show or set project version for python ## @category Update version:: bin/version-python.sh $(V) .PHONY: fix-python ## Fix python lint errors ## @category Fix fix-python: ./bin/fix-python.sh .PHONY: fix ## Fix python lint errors ## @category Fix fix:: fix-python .PHONY: typecheck ## Static typecheck ## @category Lint typecheck: uv run --group lint --group test --group build basedpyright . .PHONY: ty ## Static typecheck with ty ## @category Lint ty: uv run --group lint --group test --group build ty check . .PHONY: complexity ## Lint backend complexity ## @category Lint complexity: ./bin/lint-complexity.sh .PHONY: lint-python ## Lint python ## @category Lint lint-python: ./bin/lint-python.sh .PHONY: lint ## Lint python ## @category Lint lint:: lint-python .PHONY: uml ## Create a UML class diagram ## @category Lint uml: bin/uml.sh .PHONY: cycle ## Detect Circular imports ## @category Lint cycle: uvx pycycle --ignore node_modules,.venv --verbose --here T := .PHONY: test-python ## Test Python ## @category Test test-python: ./bin/test-python.sh $(T) .PHONY: test ## Run Python Tests. Use T variable to run specific tests ## @category Test test:: test-python ifndef OVERRIDE_BUILD .PHONY: build ## Build package ## @category Build build:: uv build endif ifndef OVERRIDE_PUBLISH .PHONY: publish ## Publish package to pypi ## @category Deploy publish: uv publish endif ================================================ FILE: ci/Dockerfile ================================================ ARG CODEX_BUILDER_BASE_VERSION ARG CODEX_BASE_VERSION FROM ajslater/codex-builder-base:${CODEX_BUILDER_BASE_VERSION} AS codex-built ARG CODEX_WHEEL WORKDIR /app # Install codex COPY ./dist/$CODEX_WHEEL ./dist/$CODEX_WHEEL # hadolint ignore=DL3059,DL3013 RUN PYMUPDF_SETUP_PY_LIMITED_API=0 pip3 install --no-cache-dir ./dist/$CODEX_WHEEL # Slim down /usr/local before it gets copied to the final image # hadolint ignore=DL3059 RUN set -eux \ # Remove pip, setuptools, wheel — not needed at runtime && pip3 uninstall -y pip setuptools wheel 2>/dev/null || true \ && rm -rf /usr/local/bin/pip* \ # Strip debug symbols from shared libraries (~30-50% size reduction on .so files) && find /usr/local -name '*.so' -exec strip --strip-unneeded {} + 2>/dev/null || true \ && find /usr/local -name '*.so.*' -exec strip --strip-unneeded {} + 2>/dev/null || true \ # Remove Python bytecode caches (regenerated on first import) && find /usr/local -type d -name '__pycache__' -exec rm -rf {} + 2>/dev/null || true \ && find /usr/local -name '*.pyc' -delete 2>/dev/null || true \ # Remove the stdlib test suite (~30MB) — safe, never needed at runtime && rm -rf /usr/local/lib/python*/test \ && rm -rf /usr/local/lib/python*/idlelib \ && rm -rf /usr/local/lib/python*/ensurepip \ # Remove type stubs — only used by type checkers && find /usr/local -name '*.pyi' -delete 2>/dev/null || true \ # Remove the installed wheel && rm -f /tmp/${CODEX_WHEEL} FROM ajslater/codex-base:${CODEX_BASE_VERSION} ARG CODEX_VERSION LABEL org.opencontainers.image.title="Codex" \ org.opencontainers.image.version="${CODEX_VERSION}" \ org.opencontainers.image.authors="AJ Slater " \ org.opencontainers.image.url="https://codex-reader-app" \ org.opencontainers.image.source="https://github.com/ajslater/codex" \ org.opencontainers.image.licenses="GPL-3.0-only" \ org.opencontainers.image.deprecated="true" \ org.opencontainers.image.description="This image has moved to ghcr.io/ajslater/codex" ENV DOCKER_IMAGE_DEPRECATED="This docker image has moved to ghcr.io/ajslater/codex. This may be the last version on docker.io" # Create the comics directory RUN mkdir -p /comics && touch /comics/DOCKER_UNMOUNTED_VOLUME # Fix Synology comicbox requiring config RUN mkdir -p /home/abc/.config/comicbox \ && chown -R abc /home/abc/.config \ && chmod 777 /home/abc/.config /home/abc/.config/comicbox # The final image is the mininimal base with /usr/local copied. # Possibly could optimize this further to only get python and bin COPY --from=codex-built /usr/local /usr/local VOLUME /comics VOLUME /config EXPOSE 9810 CMD ["/usr/local/bin/codex"] ================================================ FILE: ci/base.Dockerfile ================================================ FROM ajslater/python-debian:3.14.3-slim-trixie_2 ARG CODEX_BASE_VERSION LABEL maintainer="AJ Slater " LABEL version=$CODEX_BASE_VERSION COPY ci/debian.sources /etc/apt/sources.list.d/ # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ curl \ libimagequant0 \ libjpeg62-turbo \ libopenjp2-7 \ libssl3 \ libyaml-0-2 \ libtiff6 \ libwebp7 \ ruamel.yaml.clib \ unrar \ zlib1g \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # hadolint ignore=DL3013,DL3042 RUN pip3 install --no-cache --upgrade pip ================================================ FILE: ci/builder-base.Dockerfile ================================================ FROM nikolaik/python-nodejs:python3.14-nodejs24 # nodejs25 blocked on bug https://github.com/nodejs/node/issues/60303 ARG CODEX_BUILDER_BASE_VERSION LABEL maintainer="AJ Slater " LABEL version=${CODEX_BUILDER_BASE_VERSION} # **** install codex system build dependency packages ****" COPY ci/debian.sources /etc/apt/sources.list.d/ # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ bash \ build-essential \ cmake \ git \ libimagequant0 \ libjpeg62-turbo \ libopenjp2-7 \ libssl3 \ libyaml-0-2 \ libtiff6 \ libwebp7 \ python3-dev \ ruamel.yaml.clib \ unrar \ zlib1g \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* WORKDIR /app # hadolint ignore=DL3013,DL3042 RUN pip3 install --no-cache --upgrade pip ================================================ FILE: ci/circleci-step-halt.sh ================================================ #!/bin/bash # If the skip job flag is step. skip this step. set -euo pipefail if [ -f ./SKIP_STEPS ]; then circleci-agent step halt fi ================================================ FILE: ci/cleanup-repo.py ================================================ #!/usr/bin/env python3 """Remove old tags from a docker repo.""" import argparse import json import sys import time from datetime import datetime from getpass import getpass from urllib.request import Request, urlopen API_BASE = "https://hub.docker.com/v2" HTTP_OK = 200 HTTP_NO_CONTENT = 204 API_TIMEOUT = 10 def login(username, password): """Docker login.""" url = f"{API_BASE}/users/login/" payload = {"username": username, "password": password} data = json.dumps(payload).encode("utf-8") request = Request(url, data=data, method="POST") # noqa: S310 resp = urlopen( # noqa: S310 request, timeout=API_TIMEOUT, ) if resp.status_code != HTTP_OK: print(f"Request {url} failed with status code {resp.status_code}:") print(resp.text) resp.raise_for_status() return resp.json()["token"] def fetch_all_tags(namespace, repo, token): """Get all the tags for a repo.""" url = f"{API_BASE}/repositories/{namespace}/{repo}/tags?page_size=100" headers = {"Authorization": f"JWT {token}"} tags = [] while url: request = Request(url, headers=headers) # noqa: S310 response = urlopen(request, timeout=API_TIMEOUT) # noqa: S310 response.raise_for_status() data = response.json() tags.extend(data["results"]) url = data.get("next") return tags def delete_tag(namespace, repo, tag, token, retries=3, delay=2): """Delete a tag.""" headers = {"Authorization": f"JWT {token}"} url = f"{API_BASE}/repositories/{namespace}/{repo}/tags/{tag}/" for attempt in range(1, retries + 1): request = Request(url, headers=headers, method="DELETE") # noqa: S310 resp = urlopen(request, timeout=API_TIMEOUT) # noqa: S310 if resp.status_code == HTTP_NO_CONTENT: return True print( f"Attempt {attempt} failed to delete {tag} (HTTP {resp.status_code}). Retrying in {delay}s..." ) time.sleep(delay) return False def read_password(args): """Read password/token securely from stdin or prompt.""" if not sys.stdin.isatty(): # user piped input return sys.stdin.read().strip() if args.password_prompt: # flag for interactive prompt return getpass("Docker Hub password or access token: ") return args.password def get_args(): """Get Args.""" parser = argparse.ArgumentParser( description="Cleanup old Docker Hub tags", epilog="password is preferentially read from stdin", ) parser.add_argument("username", help="Docker Hub username") parser.add_argument( "--password", help="Password or access token (not recommended for security)" ) parser.add_argument( "--password-prompt", action="store_true", help="Read password securely from prompt", ) parser.add_argument("namespace", help="Namespace or user of the repository") parser.add_argument("repository", help="Repository name") parser.add_argument( "--no-confirm", action="store_true", help="Do not confirm deletion with input prompt", ) parser.add_argument( "--keep", type=int, default=10, help="Number of latest tags to keep (default 10)", ) parser.add_argument( "--dry-run", action="store_true", help="Do not actually delete, just show what would be deleted", ) return parser.parse_args() def _init(): args = get_args() password = read_password(args) if not password: sys.exit( "❌ No password provided. Use --password, --password-stdin, or pipe it in." ) return args, password def _get_tags_to_delete(args, token): """Get deletebale tags.""" tags = fetch_all_tags(args.namespace, args.repository, token) if not tags: print("No tags found.") return None # Sort tags by last_updated descending tags.sort( key=lambda t: datetime.fromisoformat(t["last_updated"]), reverse=True, ) to_delete = tags[args.keep :] if not to_delete: print(f"Nothing to delete (<= {args.keep} tags).") return None print(f"Keeping {args.keep} most recent tags:") for t in tags[: args.keep]: print(f" {t['name']} ({t['last_updated']})") print(f"\nTags to delete ({len(to_delete)}):") for t in to_delete: print(f" {t['name']} ({t['last_updated']})") return to_delete def main(): """Run the Program.""" args, password = _init() token = login(args.username, password) print(f"Logged in as {args.username}") to_delete = _get_tags_to_delete(args, token) if not to_delete: return if args.dry_run: print("\nDry run mode. No tags will be deleted.") return if not args.no_confirm: confirm = input("\nProceed with deletion? (y/N) ").strip().lower() if confirm != "y": print("Aborted.") return for t in to_delete: success = delete_tag(args.namespace, args.repository, t["name"], token) print(f"{'✅ Deleted' if success else '⚠️ Failed'}: {t['name']}") if __name__ == "__main__": main() ================================================ FILE: ci/debian.sources ================================================ Types: deb URIs: http://deb.debian.org/debian Suites: trixie trixie-updates Components: main contrib non-free Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg Types: deb URIs: http://deb.debian.org/debian-security Suites: trixie-security Components: main contrib non-free Signed-By: /usr/share/keyrings/debian-archive-keyring.gpg ================================================ FILE: ci/dev.Dockerfile ================================================ FROM ajslater/codex-builder-base:latest-aarch64 LABEL maintainer="AJ Slater " LABEL version=dev # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ htop \ neovim \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # hadolint ignore=DL3059,DL3013 RUN pip3 install --upgrade --no-cache-dir \ pip WORKDIR /app COPY pyproject.toml . RUN uv sync --no-install-project --all-extras --upgrade ================================================ FILE: ci/dist-builder.Dockerfile ================================================ FROM oven/bun:latest AS bun-source ARG CODEX_BUILDER_BASE_VERSION FROM ajslater/codex-builder-base:${CODEX_BUILDER_BASE_VERSION} ARG CODEX_DIST_BUILDER_VERSION LABEL maintainer="AJ Slater " LABEL version=${CODEX_DIST_BUILDER_VERSION} # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ shellcheck \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* COPY --from=bun-source /usr/local/bin/bun /usr/local/bin/bun COPY --from=bun-source /usr/local/bin/bunx /usr/local/bin/bunx WORKDIR /app # **** install python app dependencies **** # hadolint ignore=DL3022 COPY pyproject.toml uv.lock ./ RUN PIP_CACHE_DIR=$(pip3 cache dir) PYMUPDF_SETUP_PY_LIMITED_API=0 uv sync --no-install-project --no-dev --group lint --group test # *** install node lint & test dependency packages *** COPY package.json bun.lock ./ RUN bun install # **** install npm app dependencies **** WORKDIR /app/frontend COPY frontend/package.json frontend/bun.lock ./ RUN bun install WORKDIR /app # **** copying source for dev build **** COPY . . VOLUME /app/codex/static_build VOLUME /app/codex/static VOLUME /app/dist VOLUME /app/test-results VOLUME /app/frontend/src/choices ================================================ FILE: ci/docker-bake.hcl ================================================ variable "ARCH" {} variable "CODEX_ARCH_VERSION" {} variable "CODEX_BASE_VERSION" {} variable "CODEX_BUILDER_BASE_VERSION" {} variable "CODEX_DIST_BUILDER_VERSION" {} variable "CODEX_WHEEL" {} variable "PKG_VERSION" {} target "codex-base" { cache-from = [ "type=registry,ref=docker.io/ajslater/codex-base:buildcache", "type=registry,ref=docker.io/ajslater/codex-base:latest-${ARCH}" ] cache-to = [ "type=registry,ref=docker.io/ajslater/codex-base:buildcache,mode=max" ] dockerfile = "ci/base.Dockerfile" tags = [ "docker.io/ajslater/codex-base:${CODEX_BASE_VERSION}", "docker.io/ajslater/codex-base:latest-${ARCH}" ] platforms = ["${ARCH}"] output = [ "type=registry" ] } target "codex-builder-base" { inherits = ["codex-base"] cache-from = [ "type=registry,ref=docker.io/ajslater/codex-builder-base:buildcache", "type=registry,ref=docker.io/ajslater/codex-builder-base:latest-${ARCH}" ] cache-to = [ "type=registry,ref=docker.io/ajslater/codex-builder-base:buildcache,mode=max" ] dockerfile = "ci/builder-base.Dockerfile" tags = [ "docker.io/ajslater/codex-builder-base:${CODEX_BUILDER_BASE_VERSION}", "docker.io/ajslater/codex-builder-base:latest-${ARCH}" ] output = [ "type=registry" ] } target "codex-dist-builder" { inherits = ["codex-builder-base"] args = { CODEX_BUILDER_BASE_VERSION = CODEX_BUILDER_BASE_VERSION } cache-from = [ "type=registry,ref=docker.io/ajslater/codex-dist-builder:buildcache", "type=registry,ref=docker.io/ajslater/codex-dist-builder:latest-${ARCH}" ] cache-to = [ "type=registry,ref=docker.io/ajslater/codex-dist-builder:buildcache,mode=max" ] dockerfile = "ci/dist-builder.Dockerfile" tags = [ "docker.io/ajslater/codex-dist-builder:${CODEX_DIST_BUILDER_VERSION}", "docker.io/ajslater/codex-dist-builder:latest-${ARCH}" ] output = [ "type=docker", "type=registry" ] } target "codex-arch" { inherits = ["codex-builder-base"] args = { CODEX_BASE_VERSION = CODEX_BASE_VERSION CODEX_BUILDER_BASE_VERSION = CODEX_BUILDER_BASE_VERSION CODEX_WHEEL = CODEX_WHEEL PKG_VERSION = PKG_VERSION } cache-from = [ "type=registry,ref=docker.io/ajslater/codex:latest" ] cache-to = [] dockerfile = "ci/Dockerfile" tags = [ "docker.io/ajslater/codex-arch:${CODEX_ARCH_VERSION}", ] annotations = [ "manifest:org.opencontainers.image.title=Codex", "manifest:org.opencontainers.image.description=Codex Comic Server", "manifest:org.opencontainers.image.version=${PKG_VERSION}", "manifest:org.opencontainers.image.authors=AJ Slater ", "manifest:org.opencontainers.image.url=https://codex-reader.app", "manifest:org.opencontainers.image.source=https://github.com/ajslater/codex", "manifest:org.opencontainers.image.licenses=GPL-3.0-only", "manifest:org.opencontainers.image.deprecated=true", "manifest:org.opencontainers.image.description=This image has moved to ghcr.io/ajslater/codex" ] output = [ "type=registry" ] } ================================================ FILE: ci/docker-build-image.sh ================================================ #!/usr/bin/env bash # Generic image builder script set -xeuo pipefail . ./ci/machine-env.sh # Set env TARGET=$1 # the docker bake target to build REPO=docker.io/ajslater/$1 VERSION_VAR=${TARGET^^} VERSION_VAR=${VERSION_VAR//-/_}_VERSION IMAGE="${REPO}:${!VERSION_VAR}" if [ "${1-}" == "-f" ]; then shift else # Skip build if image is already built. Optionally pull it. if docker manifest inspect "$IMAGE"; then echo "$IMAGE" is already built. if [[ $* == *pull* ]]; then docker image pull "$IMAGE" fi exit 0 fi fi # Build ARCH=$(./ci/machine-arch.sh) export ARCH docker buildx bake \ --builder codex-builder \ --file ci/docker-bake.hcl \ "$TARGET" # Clean Repo if [[ $* == *clean* ]]; then export UV_NO_DEV=1 echo "$DOCKER_PASS" | uv run --only-group ci ./ci/cleanup-repo.py --no-confirm "$DOCKER_USER" ajslater "$TARGET" || true fi ================================================ FILE: ci/docker-compose-exit.sh ================================================ #!/bin/bash # Run a docker compose service and return its exit code . ./ci/machine-env.sh docker compose up --exit-code-from "$1" "$1" ================================================ FILE: ci/docker-init.sh ================================================ #!/bin/bash # initialize docker builder with correct emulators for this arch set -euo pipefail # login to docker using environment variables echo "$DOCKER_PASS" | docker login --username="$DOCKER_USER" --password-stdin # install emulator binaries if needed EMULATORS= if [[ ${PLATFORMS-} == "linux/armhf" ]]; then # this is the only arch i need to cross compile on circleci EMULATORS=arm fi if [[ -n ${EMULATORS-} ]]; then docker run --rm --privileged tonistiigi/binfmt:latest --install "$EMULATORS" fi # buildx requires creating a builder on a fresh system BUILDER_NAME=codex-builder if ! docker buildx ls | grep -q "$BUILDER_NAME"; then echo "Builder '${BUILDER_NAME}' does not exist. Creating it now..." docker buildx create --name "$BUILDER_NAME" --use echo "Builder '${BUILDER_NAME}' created and selected." else echo "Builder '${BUILDER_NAME}' already exists. Selecting it now..." docker buildx use "$BUILDER_NAME" fi # optional # docker buildx inspect --bootstrap ================================================ FILE: ci/docker-push.sh ================================================ #!/bin/bash # Load arch images and push all archs as one image to docker.io set -euxo pipefail . ./ci/machine-env.sh IMAGE=docker.io/ajslater/codex ARCH_IMAGE="ajslater/codex-arch" ARCHES=(x86_64 aarch64) if [[ $CODEX_VERSION =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then # If the version is just numbers push it as latest CODEX_LATEST=1 fi TAG_ARGS=(-t "$IMAGE:$CODEX_VERSION") if [ "${CODEX_LATEST:-}" != "" ]; then TAG_ARGS+=(-t "$IMAGE":latest) fi IMAGE_ARGS=() for arch in "${ARCHES[@]}"; do IMAGE_ARGS+=("$ARCH_IMAGE:${CODEX_VERSION}-${arch}") done docker buildx imagetools create \ --annotation "index:org.opencontainers.image.title=Codex" \ --annotation "index:org.opencontainers.image.description=Codex Comic Server" \ --annotation "index:org.opencontainers.image.version=${CODEX_VERSION}" \ --annotation "index:org.opencontainers.image.authors=AJ Slater " \ --annotation "index:org.opencontainers.image.url=https://codex-reader.app" \ --annotation "index:org.opencontainers.image.source=https://github.com/ajslater/codex" \ --annotation "index:org.opencontainers.image.licenses=GPL-3.0-only" \ --annotation "index:org.opencontainers.image.deprecated=true" \ --annotation "index:org.opencontainers.image.description=This image has moved to ghcr.io/ajslater/codex" \ "${TAG_ARGS[@]}" \ "${IMAGE_ARGS[@]}" docker buildx imagetools inspect "$IMAGE:$CODEX_VERSION" if [ "${CODEX_LATEST:-}" != "" ]; then docker buildx imagetools inspect "$IMAGE:latest" fi export UV_NO_DEV=1 echo "$DOCKER_PASS" | uv run --only-group ci ./ci/cleanup-repo.py --keep 0 --no-confirm "$DOCKER_USER" ajslater codex-arch || true ================================================ FILE: ci/docker-tag-remote-version-as-latest.sh ================================================ #!/bin/bash # Tag a remote version as latest set -euo pipefail REPO=docker.io/ajslater/codex VERSION=$1 docker buildx imagetools create "$REPO:$VERSION" --tag "$REPO:latest" ================================================ FILE: ci/machine-arch.sh ================================================ #!/bin/bash # get the target arch for the platform if [[ ${PLATFORMS-} == "linux/armhf" ]]; then ARCH=aarch32 else ARCH=$(uname -m) fi echo "$ARCH" ================================================ FILE: ci/machine-env.sh ================================================ #!/bin/bash # export env variables export PATH=$PATH:"$HOME/.local/bin" . ./ci/versions-env-filename.sh set -a # shellcheck disable=SC1090 . "$ENV_FN" set +a ================================================ FILE: ci/machine-init.sh ================================================ #!/bin/bash # Initialize environment for this machine. set -euo pipefail export PATH=$PATH:$HOME/.local/bin ./ci/circleci-step-halt.sh ./ci/machine-packages.sh ./ci/docker-init.sh if [ $# -ne 0 ]; then ./ci/versions-create-env.sh "$@" fi . ./ci/machine-env.sh ================================================ FILE: ci/machine-packages.sh ================================================ #!/bin/bash # install and upgrade system packages. set -euo pipefail # uv if which uv; then echo "uv already installed." else curl -LsSf https://astral.sh/uv/install.sh | sh fi pip3 install --upgrade pip ================================================ FILE: ci/package.Dockerfile ================================================ FROM ajslater/codex-base:latest-aarch64 ARG CODEX_VERSION ENV CODEX_VERSION=${CODEX_VERSION} LABEL maintainer="AJ Slater " LABEL version=$CODEX_VERSION # hadolint ignore=DL3008 RUN apt-get clean \ && apt-get update \ && apt-get install --no-install-recommends -y \ htop \ neovim \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* # hadolint ignore=DL3059,DL3013 RUN pip3 install --upgrade --no-cache-dir \ pip ENV WHEEL=codex-${CODEX_VERSION}-py3-none-any.whl WORKDIR / COPY dist/${WHEEL} . RUN pip3 install --no-cache-dir ./${WHEEL} VOLUME /comics VOLUME /config VOLUME /dist EXPOSE 9810 CMD ["/usr/local/bin/codex"] ================================================ FILE: ci/python-publish.sh ================================================ #!/usr/bin/env bash # Publish distribution to pypi . ./ci/machine-env.sh make publish ================================================ FILE: ci/version-checksum.sh ================================================ #!/bin/bash # create an arched md5sum from a list of parts set -euo pipefail # This script must be sourced to pass these arrays in properly # Params: # EXTRA_MD5S double space separated array of md5s and labels # DEPS array of dependencies DEPS_MD5=$(md5sum "${DEPS[@]}") ALL_MD5S=("${EXTRA_MD5S[@]}" "${DEPS_MD5[@]}") VERSION=$( echo "${ALL_MD5S[@]}" \ | LC_ALL=C sort -k 2 \ | md5sum \ | awk '{print $1}' ) ARCH=$(./ci/machine-arch.sh) VERSION="${VERSION}-$ARCH" echo "$VERSION" ================================================ FILE: ci/version-codex-base.sh ================================================ #!/bin/bash # Compute the version tag for codex-base set -euo pipefail EXTRA_MD5S=("x x") DEPS=( "$0" .dockerignore ci/base.Dockerfile ci/debian.sources ci/docker-bake.hcl ci/docker-build-image.sh ci/machine-arch.sh ci/version-checksum.sh ci/versions-create-env.sh ci/versions-env-filename.sh ) . ./ci/version-checksum.sh ================================================ FILE: ci/version-codex-builder-base.sh ================================================ #!/bin/bash # Compute the version tag for ajslater/codex-builder-base set -euo pipefail . ci/machine-env.sh EXTRA_MD5S=("$CODEX_BASE_VERSION codex-base-version") DEPS=( "$0" ci/builder-base.Dockerfile ) . ./ci/version-checksum.sh ================================================ FILE: ci/version-codex-dist-builder.sh ================================================ #!/usr/bin/env bash # Compute the version tag for ajslater/codex-dist-builder set -euo pipefail . ./ci/machine-env.sh EXTRA_MD5S=("$CODEX_BUILDER_BASE_VERSION codex-builder-base-version") # shellcheck disable=SC2046 readarray -d '' SOURCE_DEPS < <(find cfg codex frontend tests -type f \( \ ! -path "*node_modules*" \ ! -path "*codex/static_build*" \ ! -path "*codex/static*" \ ! -name "*~" \ ! -name "*.pyc" \ ! -name ".*cache" \ ! -name ".DS_Store" \ -print0 \ \)) DEPS=( "$0" .prettierignore .shellcheckrc bin/build-dist.sh bin/collectstatic.sh bin/lint-python.sh bin/lint-complexity.sh bin/lint.sh bin/manage.py bin/pm bin/roman.py bin/test-python.sh bun.lock ci/dist-builder.Dockerfile eslint.config.js package.json pyproject.toml uv.lock Makefile "${SOURCE_DEPS[@]}" ) . ./ci/version-checksum.sh ================================================ FILE: ci/versions-create-env.sh ================================================ #!/bin/bash # Create the docker .env for this architecture set -euo pipefail . ./ci/versions-env-filename.sh if [[ $* == *base* ]]; then CODEX_VERSION=$(uv version --short) ARCH=$(./ci/machine-arch.sh) ( echo "CODEX_BASE_VERSION=$(./ci/version-codex-base.sh)" echo "CODEX_BUILDER_BASE_VERSION=$(./ci/version-codex-builder-base.sh)" echo "CODEX_VERSION=${CODEX_VERSION}" echo "CODEX_ARCH_VERSION=${CODEX_VERSION}-${ARCH}" echo "CODEX_WHEEL=codex-${CODEX_VERSION}-py3-none-any.whl" ) > "$ENV_FN" fi if [[ $* == *dist-builder* ]]; then echo "CODEX_DIST_BUILDER_VERSION=$(./ci/version-codex-dist-builder.sh)" >> "$ENV_FN" fi ================================================ FILE: ci/versions-env-filename.sh ================================================ #!/bin/bash # Set the env filename var ARCH=$(./ci/machine-arch.sh) export ENV_FN=./.env-${ARCH} ================================================ FILE: codex/__init__.py ================================================ """Initialize Django.""" from os import environ from django import setup from codex.signals.django_signals import connect_signals if environ.get("PYTHONDEVMODE"): from icecream.builtins import install install() # This all happens before anything else to make django safe to use. environ.setdefault("DJANGO_SETTINGS_MODULE", "codex.settings") setup() connect_signals() ================================================ FILE: codex/applications/__init__.py ================================================ """ASGI Applications.""" ================================================ FILE: codex/applications/lifespan.py ================================================ """Start and stop daemons.""" import asyncio from contextlib import suppress from loguru import logger from codex.signals.os_signals import bind_signals_to_loop from codex.startup.loguru import loguru_init from codex.websockets.listener import BroadcastListener class LifespanApplication: """Lifespan AGSI App.""" SCOPE_TYPE = "lifespan" def __init__(self, broadcast_queue) -> None: """Create logger and librarian.""" loguru_init() self.broadcast_queue = broadcast_queue self.broadcast_listener = BroadcastListener(logger, broadcast_queue) self.broadcast_listener_task = None async def _event(self, event, send) -> None: """Process a lifespan event.""" try: logger.debug(f"Lifespan {event} started.") func = getattr(self, "_" + event) await func() await send({"type": f"lifespan.{event}.complete"}) logger.debug(f"Lifespan {event} complete.") except Exception: await send({"type": f"lifespan.{event}.failed"}) logger.exception(f"Lifespan {event} failed.") raise async def _startup(self) -> None: """Startup tasks.""" bind_signals_to_loop() self.broadcast_listener_task = asyncio.create_task( self.broadcast_listener.listen() ) async def _shutdown(self) -> None: """Shutdown tasks.""" with suppress(ValueError): # Depending on timing this can be closed already self.broadcast_queue.put(None) if self.broadcast_listener_task: await self.broadcast_listener_task async def __call__(self, scope, receive, send) -> None: """Lifespan application.""" if scope["type"] != self.SCOPE_TYPE: return logger.debug("Lifespan application started.") while True: try: message = await receive() if message["type"] == "lifespan.startup": await self._event("startup", send) elif message["type"] == "lifespan.shutdown": await self._event("shutdown", send) break except Exception: logger.exception("Lifespan application") logger.debug("Lifespan application stopped.") ================================================ FILE: codex/applications/websocket.py ================================================ """Channels Websocket Application.""" from channels.auth import AuthMiddlewareStack from channels.routing import URLRouter from channels.security.websocket import AllowedHostsOriginValidator from django.urls import path from codex.websockets.consumers import NotifierConsumer WEBSOCKET_APPLICATION = AllowedHostsOriginValidator( AuthMiddlewareStack( URLRouter( [ path( # pyright:ignore[reportCallIssue] "api/v3/ws", NotifierConsumer.as_asgi(), # pyright: ignore[reportArgumentType] name="websocket", ), ] ) ) ) ================================================ FILE: codex/asgi.py ================================================ """ ASGI config for codex project. It exposes the ASGI callable as a module-level variable named ``DJANGO_APPLICATION``. For more information on this file, see https://docs.djangoproject.com/en/dev/howto/deployment/asgi/ """ from channels.routing import ProtocolTypeRouter from django.core.asgi import get_asgi_application from codex.applications.lifespan import LifespanApplication from codex.applications.websocket import WEBSOCKET_APPLICATION from codex.websockets.mp_queue import BROADCAST_QUEUE application = ProtocolTypeRouter( { "http": get_asgi_application(), "websocket": WEBSOCKET_APPLICATION, "lifespan": LifespanApplication(BROADCAST_QUEUE), } ) ================================================ FILE: codex/authentication.py ================================================ """Custom Authentication classes.""" from django.contrib.auth.middleware import RemoteUserMiddleware from rest_framework.authentication import TokenAuthentication class BearerTokenAuthentication(TokenAuthentication): """Bearer Token Authentication.""" keyword = "Bearer" class HttpRemoteUserMiddleware(RemoteUserMiddleware): """ Http Remote User Backend. Regular REMOTE_USER can only be set by on machine wgsi socket magic. """ header = "HTTP_REMOTE_USER" ================================================ FILE: codex/choices/__init__.py ================================================ """Enums and Choices for models and Seralizers.""" ================================================ FILE: codex/choices/admin.py ================================================ """Admin Choices.""" from types import MappingProxyType from django.db.models.enums import TextChoices class AdminFlagChoices(TextChoices): """Choices for Admin Flags.""" AUTO_UPDATE = "AU" BANNER_TEXT = "BT" FOLDER_VIEW = "FV" IMPORT_METADATA = "IM" LAZY_IMPORT_METADATA = "LI" NON_USERS = "NU" REGISTRATION = "RG" SEND_TELEMETRY = "ST" ADMIN_FLAG_CHOICES = MappingProxyType( { AdminFlagChoices.AUTO_UPDATE.value: "Auto Update", AdminFlagChoices.BANNER_TEXT.value: "Banner Text", AdminFlagChoices.FOLDER_VIEW.value: "Folder View", AdminFlagChoices.IMPORT_METADATA.value: "Import Metadata on Library Scan", AdminFlagChoices.LAZY_IMPORT_METADATA.value: "Import Metadata on Demand", AdminFlagChoices.NON_USERS.value: "Non Users", AdminFlagChoices.REGISTRATION.value: "Registration", AdminFlagChoices.SEND_TELEMETRY.value: "Send Stats", } ) ================================================ FILE: codex/choices/browser.py ================================================ """Browser Choices.""" from types import MappingProxyType from comicbox.enums.maps.identifiers import ID_SOURCE_NAME_MAP from codex.views.browser.const import BROWSER_FILTER_KEYS BROWSER_BOOKMARK_FILTER_CHOICES = MappingProxyType( { "": "All", "IN_PROGRESS": "In Progress", "READ": "Read", "UNREAD": "Unread", }, ) BROWSER_ORDER_BY_CHOICES = MappingProxyType( { "created_at": "Added Time", "age_rating": "Age Rating", "child_count": "Child Count", "critical_rating": "Critical Rating", "filename": "Filename", "size": "File Size", "bookmark_updated_at": "Last Read", "sort_name": "Name", "page_count": "Page Count", "date": "Publish Date", "search_score": "Search Score", "story_arc_number": "Story Arc Number", "updated_at": "Updated Time", } ) _GROUP_NAMES = MappingProxyType( { "p": "Publishers", "i": "Imprints", "s": "Series", "v": "Volumes", } ) BROWSER_TOP_GROUP_CHOICES = MappingProxyType( { **_GROUP_NAMES, "c": "Issues", "f": "Folders", "a": "Story Arcs", }, ) BROWSER_ROUTE_CHOICES = MappingProxyType({**BROWSER_TOP_GROUP_CHOICES, "r": "Root"}) VUETIFY_NULL_CODE = -1 _IDENTIFIER_SOURCES = MappingProxyType( {key.value: value for key, value in ID_SOURCE_NAME_MAP.items()} ) BROWSER_CHOICES = MappingProxyType( { "BOOKMARK_FILTER": BROWSER_BOOKMARK_FILTER_CHOICES, "ORDER_BY": BROWSER_ORDER_BY_CHOICES, "TOP_GROUP": BROWSER_TOP_GROUP_CHOICES, "VUETIFY_NULL_CODE": VUETIFY_NULL_CODE, "SETTINGS_GROUP": {**_GROUP_NAMES}, "IDENTIFIER_SOURCES": _IDENTIFIER_SOURCES, } ) DEFAULT_BROWSER_ROUTE = MappingProxyType({"group": "r", "pks": (0,), "page": 1}) _DEFAULT_SHOW = MappingProxyType({"i": False, "p": True, "s": True, "v": False}) _DEFAULT_FILTERS = MappingProxyType( { "bookmark": "", **dict.fromkeys(BROWSER_FILTER_KEYS, ()), } ) BROWSER_DEFAULTS = MappingProxyType( { "custom_covers": True, "dynamic_covers": True, "filters": _DEFAULT_FILTERS, "order_by": "sort_name", "order_reverse": False, "search": "", "show": _DEFAULT_SHOW, "top_group": "p", "twenty_four_hour_time": False, "always_show_filename": False, "last_route": DEFAULT_BROWSER_ROUTE, } ) DUMMY_NULL_NAME = "_none_" ================================================ FILE: codex/choices/choices_to_json.py ================================================ #!/usr/bin/env python3 """Dump choices to JSON.""" import json from collections.abc import Mapping from pathlib import Path from types import MappingProxyType from caseconverter import camelcase from codex.choices.admin import ADMIN_FLAG_CHOICES from codex.choices.browser import BROWSER_CHOICES, BROWSER_DEFAULTS from codex.choices.jobs import ADMIN_JOBS from codex.choices.notifications import Notifications from codex.choices.reader import READER_CHOICES, READER_DEFAULTS from codex.choices.search import SEARCH_FIELDS from codex.choices.statii import ADMIN_STATUS_TITLES _DEFAULTS = MappingProxyType( {"browser-choices.json": BROWSER_DEFAULTS, "reader-choices.json": READER_DEFAULTS} ) _DUMPS = MappingProxyType( { "admin-flag-choices.json": ADMIN_FLAG_CHOICES, "admin-status-titles.json": ADMIN_STATUS_TITLES, "browser-choices.json": BROWSER_CHOICES, "reader-choices.json": READER_CHOICES, } ) _MAP_DUMPS = MappingProxyType( { "admin-jobs.json": ADMIN_JOBS, "browser-defaults.json": BROWSER_DEFAULTS, "browser-map.json": BROWSER_CHOICES, "reader-defaults.json": READER_DEFAULTS, "reader-map.json": READER_CHOICES, "search-map.json": SEARCH_FIELDS, } ) def _to_vuetify_choices(defaults, key: str, obj_map) -> list: """Transform a dict into a list of vuetify choices.""" default = defaults.get(key) vuetify_list = [] for value, title in obj_map.items(): vuetify_dict = { "value": value, "title": title, } if default == value: vuetify_dict["default"] = True vuetify_list.append(vuetify_dict) return vuetify_list def _json_key(key: str): """Transform key to json version.""" return key if key.upper() == key else camelcase(key) def _make_json_serializable(data, *, jsonize_keys: bool = True) -> list | dict: """Convert nested Mapping objects to dicts.""" if isinstance(data, Mapping): json_dict = {} for key, value in data.items(): json_value = _make_json_serializable(value) json_key = _json_key(key) if jsonize_keys else key json_dict[json_key] = json_value return json_dict if isinstance(data, list | tuple | frozenset | set): return [_make_json_serializable(item) for item in data] return data def _to_vuetify_dict(fn: str, data) -> dict: """Convert mappings to vuetify dict list.""" vuetify_data = {} defaults = _DEFAULTS.get(fn) or {} for key, obj in data.items(): if isinstance(obj, Mapping): vuetify_value = _to_vuetify_choices(defaults, key, obj) else: vuetify_value = _make_json_serializable(obj) vuetify_data[_json_key(key)] = vuetify_value return vuetify_data def _dump( parent_path: Path, fn: str, data, *, vuetify: bool, jsonize_keys: bool ) -> None: """Dump data to json file.""" vuetify_data = ( _to_vuetify_dict(fn, data) if vuetify else _make_json_serializable(data, jsonize_keys=jsonize_keys) ) path = parent_path / fn with path.open("w") as json_file: json.dump(vuetify_data, json_file, indent=2) json_file.write("\n") def _make_websocket_messages() -> MappingProxyType: return MappingProxyType( {"messages": {msg.name: msg.value for msg in Notifications}} ) def main() -> None: """Dump all files.""" import sys parent_path = sys.argv[1] if len(sys.argv) > 1 else "." parent_path = Path(parent_path) parent_path.mkdir(exist_ok=True) for fn, data in _DUMPS.items(): _dump(parent_path, fn, data, vuetify=True, jsonize_keys=True) for fn, data in _MAP_DUMPS.items(): jsonize_keys = fn != "search-map.json" _dump(parent_path, fn, data, vuetify=False, jsonize_keys=jsonize_keys) ws_messages = _make_websocket_messages() _dump( parent_path, "websocket-messages.json", ws_messages, vuetify=False, jsonize_keys=True, ) if __name__ == "__main__": main() ================================================ FILE: codex/choices/jobs.py ================================================ """Admin Jobs: task-to-status mapping for the combined Jobs tab.""" from types import MappingProxyType # All importer sub-status codes (triggered by poll/import tasks). _IMPORTER_STATUSES = ( "IRT", "IAT", "IQT", "IQC", "IQL", "IQV", "ICT", "IUT", "ICC", "IUC", "ICV", "IUV", "ILT", "ILV", "IMF", "IMC", "IMV", "IRF", "IRC", "IRV", "ISU", "ISC", "IFQ", "IFU", "IFC", "IFD", ) _POLL_STATUSES = ("WPO", *_IMPORTER_STATUSES, "IGU") _SEARCH_SYNC_STATUSES = ("SSU", "SSC") _JANITOR_NIGHTLY_STATUSES = ( "JLV", "JAF", "IMF", "JIF", "JID", "JIS", "JCT", "JRV", "JRS", "JRB", "SIR", "SSU", "SSC", "SIO", "JDO", "JDB", "CFO", "CRC", ) # Maps task value to a dict of metadata for the Jobs tab. # "statuses": tuple of LibrarianStatus.status_type codes this task drives. # "abort": optional task value that aborts this job. # title and value are Vuetify format for easier loading in the frontend. ADMIN_JOBS: MappingProxyType[str, tuple[dict, ...]] = MappingProxyType( { "ADMIN_JOBS": ( { "title": "Libraries", "jobs": ( { "value": "poll", "title": "Update Libraries", "statuses": _POLL_STATUSES, "abort": "import_abort", "variants": ( { "value": "poll", "title": "Poll", "desc": ("Update Libraries if changes detected"), }, { "value": "poll_force", "title": "Force Update", "desc": ("Forcibly update all comics in all libraries"), "confirm": "This can take a long time", }, { "value": "force_update_all_failed_imports", "title": "Failed Imports", "desc": ( "Forcibly update all failed imports in" " all libraries" ), "confirm": "This can take a long time", }, ), }, { "value": "watcher_restart", "title": "Update Watcher with DB", "desc": ( "Ensure the file Watcher is updated per database" " preferences for each library" ), "statuses": ("WRS",), }, ), }, { "title": "Covers", "jobs": ( { "value": "purge_comic_covers", "title": "Remove Comic Covers", "desc": "from every library", "confirm": "Are you sure?", "statuses": ("CRC",), }, { "value": "create_all_comic_covers", "title": "Create All Comic Covers", "desc": ( "Pre-generate covers for every comic in every" " library and all custom covers" ), "confirm": "Are you sure?", "statuses": ("CCC",), }, { "value": "force_update_groups", "title": "Update Group Timestamps", "desc": ( "Force the update of group timestamps. Will bust" " the browser cache for browser views and covers." ), "statuses": ("IGU",), }, ), }, { "title": "Search Index", "jobs": ( { "value": "search_index_update", "title": "Sync Search Index", "statuses": ("SIX", *_SEARCH_SYNC_STATUSES), "abort": "search_index_abort", "variants": ( { "value": "search_index_update", "title": "Sync Search Index", "desc": "with recently changed comics.", "confirm": "This can take a long time", }, { "value": "search_index_rebuild", "title": "Rebuild Search Index Using Sync", "desc": ( "Delete and rebuild the search index" " from scratch using the syncer." ), "confirm": "This can take a long time", }, ), }, { "value": "search_index_optimize", "title": "Optimize Search Index", "desc": ( "Merge Search Index for optimal lookup time. Runs nightly." ), "statuses": ("SIO",), }, { "value": "search_index_remove_stale", "title": "Clean Stale Index Entries", "desc": ( "Clean search index entries that are no longer in" " the library." ), "statuses": ("SIR",), }, { "value": "search_index_clear", "title": "Clear Search Index", "desc": "of all entries", "confirm": ( "Are you sure? Resyncing the search index can take a while." ), "statuses": ("SIX",), }, { "value": "db_fts_integrity_check", "title": "Integrity Check Search Index", "desc": ("Repairs Search Index on failure. Runs nightly"), "statuses": ("JIS",), }, { "value": "db_fts_rebuild", "title": "Repair Search Index", "desc": ( "Probably faster than Rebuild if the integrity check fails." ), "statuses": ("JSR",), }, ), }, { "title": "Database", "jobs": ( { "value": "db_vacuum", "title": "Optimize & Compact Database", "desc": ( "Run the sqlite3 OPTIMIZE and VACUUM pragmas. Runs nightly" ), "statuses": ("JDO",), }, { "value": "db_backup", "title": "Backup Database", "desc": "Runs nightly", "statuses": ("JDB",), }, { "value": "db_foreign_key_check", "title": "Remove Illegal Foreign Keys", "desc": ( "Check for and remove illegal foreign keys. Mark" " affected comics for update. Runs nightly." ), "statuses": ("JIF",), }, { "value": "db_integrity_check", "title": "Check Database Integrity", "desc": "Check logs for results. Runs nightly.", "confirm": ( "Can take a while on large databases, Are you sure?" ), "statuses": ("JID",), }, ), }, { "title": "Codex Software", "jobs": ( { "value": "codex_latest_version", "title": "Check for Codex Latest Version", "desc": "Check PyPi for the latest version of Codex", "statuses": ("JLV",), }, { "value": "codex_update", "title": "Update Codex", "desc": ("If Codex updates to a new version, it will restart"), "confirm": "Are you sure?", "statuses": ("JCU",), }, { "value": "codex_restart", "title": "Restart Codex Server", "desc": "Immediately", "confirm": "Are you sure?", "statuses": ("RCR",), }, { "value": "codex_shutdown", "title": "Shutdown Codex Server", "desc": "Immediately", "confirm": "Are you sure?", "statuses": ("RCS",), }, ), }, { "title": "Cleanup", "abort": "cleanup_abort", "jobs": ( { "value": "janitor_nightly", "title": "Run Nightly Maintenance", "desc": ( "Runs several cleanup, index, and database tasks" " that also run nightly." ), "confirm": ("Launches several tasks that run nightly anyway."), "statuses": _JANITOR_NIGHTLY_STATUSES, }, { "value": "cleanup_fks", "title": "Remove Orphan Tags", "desc": ( "After deleting comics, unused linked objects" " remain in case new comics use them." " Runs nightly." ), "statuses": ("JCT",), }, { "value": "cleanup_db_custom_covers", "title": "Remove Orphan Database Custom Covers", "desc": ( "Remove Custom Covers from the db that no longer" " represent custom cover images on disk." " Runs nightly." ), "statuses": ("JRV",), }, { "value": "cleanup_sessions", "title": "Cleanup Sessions", "desc": ("Remove corrupt and expired sessions. Runs nightly."), "statuses": ("JRS",), }, { "value": "cleanup_covers", "title": "Remove Orphan Cover Thumbnails", "desc": ( "no longer have source comics or custom images." " Runs nightly." ), "statuses": ("CFO", "CRC"), }, { "value": "cleanup_bookmarks", "title": "Remove Orphan Bookmarks", "desc": ("Owned by no session or user. Runs nightly."), "statuses": ("JRB",), }, { "value": "adopt_folders", "title": "Adopt Orphan Folders", "desc": ( "Move orphaned folders from the top of the folder" " tree to under their correct parent. Runs" " nightly and at startup." ), "statuses": ("JAF",), }, { "value": "librarian_clear_status", "title": "Clear Librarian Statuses", "desc": "Mark all Librarian tasks finished.", "statuses": (), }, ), }, { "title": "Notify", "jobs": ( { "value": "notify_admin_flags_changed", "title": "Admin Flags Changed", "desc": ("Notify all users that admin flags have changed."), "statuses": (), }, { "value": "notify_bookmark_changed", "title": "Bookmark Changed", "desc": ("Notify only your user that a bookmark changed."), "statuses": (), }, { "value": "notify_covers_changed", "title": "Covers Changed", "desc": ("Notify all users that covers have changed."), "statuses": (), }, { "value": "notify_failed_imports_changed", "title": "Failed Imports Changed", "desc": ("Notify admin users that failed imports have changed"), "statuses": (), }, { "value": "notify_groups_changed", "title": "Groups Changed", "desc": ("Notify all users that ACL groups have changed."), "statuses": (), }, { "value": "notify_library_changed", "title": "Library Changed", "desc": ("Notify all users libraries have changed."), "statuses": (), }, { "value": "notify_librarian_status", "title": "Librarian Status", "desc": ( "Notify admin users that a librarian job status changed." ), "statuses": (), }, { "value": "notify_users_changed", "title": "Users Changed", "desc": ( "Notify one user that their users changed or all" " users if a user was deleted." ), "statuses": (), }, ), }, ), } ) ================================================ FILE: codex/choices/notifications.py ================================================ """Notification messages.""" from enum import Enum class Notifications(Enum): """Websocket Notifications.""" ADMIN_FLAGS = "ADMIN_FLAGS_CHANGED" BOOKMARK = "BOOKMARK_CHANGED" COVERS = "COVERS_CHANGED" FAILED_IMPORTS = "FAILED_IMPORTS" GROUPS = "GROUPS_CHANGED" LIBRARY = "LIBRARY_CHANGED" LIBRARIAN_STATUS = "LIBRARIAN_STATUS" USERS = "USERS_CHANGED" ================================================ FILE: codex/choices/reader.py ================================================ """Frontend Choices, Defaults and Messages.""" from types import MappingProxyType READER_CHOICES = MappingProxyType( { "FIT_TO": MappingProxyType( { "S": "Fit to Screen", "W": "Fit to Width", "H": "Fit to Height", "O": "Original Size", } ), "READING_DIRECTION": MappingProxyType( { "ltr": "Left to Right", "rtl": "Right to Left", "ttb": "Top to Bottom", "btt": "Bottom to Top", } ), } ) READER_DEFAULTS = MappingProxyType( { "finish_on_last_page": True, "fit_to": "W", "reading_direction": "ltr", "read_rtl_in_reverse": False, "two_pages": False, "page_transition": True, "cache_book": True, } ) ================================================ FILE: codex/choices/search.py ================================================ """Create the search field alias help map.""" from types import MappingProxyType _REVERSE_TYPE_MAP = MappingProxyType( { "Boolean": ["monochrome"], "Date": ["date"], "DateTime": ["create_at", "updated_at"], "Decimal": ["critical_rating", "issue_number"], "Integer": [ "day", "month", "year", "page_count", "size", "decade", "volume", "volume_to", ], } ) _TYPE_MAP = MappingProxyType( { field: field_type for field_type, fields in _REVERSE_TYPE_MAP.items() for field in fields } ) def gen_multipart_field_aliases(field) -> frozenset: """Generate aliases for fields made of snake_case words.""" bits = field.split("_") aliases = set({field}) # Singular from plural if field.endswith("s"): singular = field[:-1] if not singular.endswith("ie"): aliases.add(singular) # Alternate delimiters for connector in ("", "-"): joined = connector.join(bits) aliases.add(joined) return frozenset(aliases) def _get_fieldmap_values(*args) -> tuple: values = set() for val in args: values |= gen_multipart_field_aliases(val) return tuple(sorted(values)) FIELDMAP = MappingProxyType( { "age_rating": ("age",), "characters": _get_fieldmap_values("category", "categories", "characters"), "collection_title": ("collection",), "country": (), "credits": _get_fieldmap_values( "authors", "contributors", "creators", "credits", "people", "persons", ), "created_at": ("created",), "critical_rating": _get_fieldmap_values("critical_rating"), "day": (), "date": (), "decade": (), "file_type": ( "filetype", "type", ), "genres": ("genre",), "identifiers": _get_fieldmap_values("id", "id_key", "identifier"), "imprint": (), "issue": (), "issue_number": ("number",), "issue_suffix": (), "locations": ("location", "loc"), "language": ("lang"), "main_character": _get_fieldmap_values("main_character"), "main_team": _get_fieldmap_values("main_team"), "month": (), "monochrome": _get_fieldmap_values("black_and_white"), "name": ("title",), "notes": (), "original_format": ("format",), "protagonist": ("protag", "lead"), "publisher": (), "page_count": ("pages",), "path": _get_fieldmap_values( "filename", "folders", ), "reading_direction": ("direction", "rd"), "review": (), "scan_info": ("scan",), "series": (), "series_groups": _get_fieldmap_values("series_groups"), "size": (), "sources": _get_fieldmap_values("sources", "id_sources"), "stories": ("story",), "story_arcs": _get_fieldmap_values("story_arcs", "arcs", "arc"), "summary": _get_fieldmap_values("desc", "description", "comments"), "tags": ("tag",), "tagger": (), "teams": ("team",), "updated_at": ("updated",), "universes": ("universe", "designation"), "volume": ("volume_from",), "volume_to": (), "year": (), } ) def create_search_field_map() -> dict: """Create the search field alias help map.""" result = {} for key, values in FIELDMAP.items(): result[key] = { "type": _TYPE_MAP.get(key, "String"), "aliases": tuple(sorted({*values} - {key})), } return result SEARCH_FIELDS = create_search_field_map() ================================================ FILE: codex/choices/statii.py ================================================ """Status code to title map.""" from itertools import chain from bidict import frozenbidict from codex.librarian.covers.status import COVERS_STATII from codex.librarian.fs.poller.status import FS_STATII from codex.librarian.fs.watcher.status import WATCHER_STATII from codex.librarian.restarter.status import RESTARTER_STATII from codex.librarian.scribe.importer.statii.create import CREATE_STATII from codex.librarian.scribe.importer.statii.delete import REMOVE_STATII from codex.librarian.scribe.importer.statii.failed import FAILED_IMPORTS_STATII from codex.librarian.scribe.importer.statii.link import LINK_STATII from codex.librarian.scribe.importer.statii.query import QUERY_STATII from codex.librarian.scribe.importer.statii.read import READ_STATII from codex.librarian.scribe.importer.statii.search import IMPORTER_SEARCH_INDEX_STATII from codex.librarian.scribe.janitor.status import JANITOR_STATII from codex.librarian.scribe.search.status import SEARCH_INDEX_STATII from codex.librarian.scribe.status import SCRIBE_STATII _STATII = ( RESTARTER_STATII, COVERS_STATII, FS_STATII, WATCHER_STATII, JANITOR_STATII, SEARCH_INDEX_STATII, SCRIBE_STATII, CREATE_STATII, REMOVE_STATII, LINK_STATII, QUERY_STATII, READ_STATII, IMPORTER_SEARCH_INDEX_STATII, FAILED_IMPORTS_STATII, ) ADMIN_STATUS_TITLES = frozenbidict( sorted((status.CODE, status.title()) for status in chain.from_iterable(_STATII)) ) ================================================ FILE: codex/librarian/README.md ================================================ # librarian Most non-ui tasks are run by the background librariand process. librariand spawns threads as well. ================================================ FILE: codex/librarian/__init__.py ================================================ """The librarian is a collection of daemons that run background tasks.""" ================================================ FILE: codex/librarian/bookmark/__init__.py ================================================ """Bookmark Thread.""" ================================================ FILE: codex/librarian/bookmark/bookmarkd.py ================================================ """Sends notifications to connections, reading from a queue.""" from collections.abc import Mapping from dataclasses import dataclass from typing import override from loguru import logger from codex.librarian.bookmark.latest_version import CodexLatestVersionUpdater from codex.librarian.bookmark.tasks import ( BookmarkUpdateTask, ClearLibrarianStatusTask, CodexLatestVersionTask, UserActiveTask, ) from codex.librarian.bookmark.update import BookmarkUpdateMixin from codex.librarian.bookmark.user_active import UserActiveMixin from codex.librarian.telemeter.tasks import TelemeterTask from codex.librarian.telemeter.telemeter import send_telemetry from codex.librarian.threads import AggregateMessageQueuedThread @dataclass class BookmarkKey: """Bookmark queue item key.""" auth_filter: Mapping[str, int | str | None] | None = None comic_pks: tuple = () user_pk: int = 0 @override def __hash__(self) -> int: """Hash the dict as a tuple.""" auth_filters = ( None if self.auth_filter is None else tuple(self.auth_filter.items()) ) return hash((auth_filters, self.comic_pks, self.user_pk)) @override def __eq__(self, other) -> bool: """Equal uses hashes.""" return self.__hash__() == other.__hash__() class BookmarkThread( AggregateMessageQueuedThread, BookmarkUpdateMixin, UserActiveMixin, ): """Aggregates Bookmark updates preventing floods updates db in batches..""" FLOOD_DELAY = 3.0 MAX_DELAY = 5.0 def __init__(self, *args, **kwargs) -> None: """Init mixins.""" super().__init__(*args, **kwargs) self.init_group_acl() self.init_user_active() def _process_task_immediately(self, task) -> None: if self.db_write_lock.locked(): self.log.warning(f"Database locked, not processing {task}") match task: case TelemeterTask(): send_telemetry(self.log) case ClearLibrarianStatusTask(): self.status_controller.finish_many([]) case CodexLatestVersionTask(): worker = CodexLatestVersionUpdater( self.log, self.librarian_queue, self.db_write_lock ) worker.update_latest_version(force=task.force) case _: self.log.warning(f"Unknown Bookmark task {task}") @override def aggregate_items(self, item) -> None: """Aggregate bookmark updates.""" task = item match task: case UserActiveTask(): # Wedge the user active recorer into the bookmark thread because it # it also wants to be done offline and low priority. key = BookmarkKey(user_pk=item.pk) self.cache[key] = None case BookmarkUpdateTask(): key = BookmarkKey(item.auth_filter, item.comic_pks) if key not in self.cache: self.cache[key] = {} self.cache[key].update(item.updates) case _: self._process_task_immediately(task) @override def send_all_items(self) -> None: """Run the task method.""" if self.db_write_lock.locked(): self.log.debug("Database locked, waiting to process bookmarks.") return cleanup = set() for key, value in self.cache.items(): try: if key.user_pk: self.update_user_active(key.user_pk, logger) elif key.comic_pks: self.update_bookmarks(key.auth_filter, key.comic_pks, value) cleanup.add(key) except Exception: self.log.exception("Updating bookmarks") self.cleanup_cache(cleanup) ================================================ FILE: codex/librarian/bookmark/latest_version.py ================================================ """Fetch the current codex version.""" import json from datetime import timedelta from urllib.request import urlopen from django.utils import timezone from codex.librarian.scribe.janitor.status import JanitorCodexLatestVersionStatus from codex.librarian.scribe.janitor.tasks import JanitorCodexUpdateTask from codex.librarian.worker import WorkerStatusBase from codex.models import Timestamp from codex.version import PACKAGE_NAME _PYPI_URL_TEMPLATE = "https://pypi.python.org/pypi/%s/json" _REPO_URL = _PYPI_URL_TEMPLATE % PACKAGE_NAME _CACHE_EXPIRY = timedelta(days=1) - timedelta(minutes=5) _REPO_TIMEOUT = 5 class CodexLatestVersionUpdater(WorkerStatusBase): """Methods for fetching the latest version.""" @staticmethod def _fetch_latest_version(): """Fetch Latest Remotely.""" response = urlopen(_REPO_URL, timeout=_REPO_TIMEOUT) # noqa: S310 source = response.read() decoded_source = source.decode("utf-8") return json.loads(decoded_source)["info"]["version"] def update_latest_version(self, *, force: bool, update: bool = False) -> None: """Get the latest version from a remote repo using a cache.""" if self.db_write_lock.locked(): self.log.warning("Database locked, not retrieving latest codex version.") return status = JanitorCodexLatestVersionStatus() try: self.status_controller.start(status) ts = Timestamp.objects.get(key=Timestamp.Choices.CODEX_VERSION.value) do_fetch = ( force or not ts.version or (timezone.now() - ts.updated_at > _CACHE_EXPIRY) ) if do_fetch: latest_version = self._fetch_latest_version() if not latest_version: reason = "Bad latest version fetched." raise ValueError(reason) ts.version = latest_version ts.save() level = "INFO" log_txt = f"Saved new latest codex version {latest_version}." if update: task = JanitorCodexUpdateTask() self.librarian_queue.put(task) else: level = "DEBUG" log_txt = "Not fetching new latest version, not expired." self.log.log(level, log_txt) finally: self.status_controller.finish(status) ================================================ FILE: codex/librarian/bookmark/tasks.py ================================================ """Bookmark Tasks.""" from collections.abc import Mapping from dataclasses import dataclass from typing import Any from codex.librarian.tasks import LibrarianTask class BookmarkTask(LibrarianTask): """Bookmark Base Class.""" @dataclass class BookmarkUpdateTask(BookmarkTask): """Bookmark a page.""" auth_filter: Mapping[str, int | str | None] comic_pks: tuple[int] updates: Mapping[str, Any] @dataclass class UserActiveTask(BookmarkTask): """Update the user's last active status.""" pk: int class ClearLibrarianStatusTask(BookmarkTask): """Clear all librarian statuses.""" @dataclass class CodexLatestVersionTask(BookmarkTask): """Get the latest version.""" force: bool = False ================================================ FILE: codex/librarian/bookmark/update.py ================================================ """Sends notifications to connections, reading from a queue.""" from django.db.models.expressions import F from django.db.models.functions import Now from django.db.models.query import Q from codex.choices.notifications import Notifications from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.notifier.tasks import NotifierTask from codex.models import Bookmark, Comic from codex.views.auth import GroupACLMixin _BOOKMARK_UPDATE_FIELDS = frozenset( { "page", "finished", } ) class BookmarkUpdateMixin(GroupACLMixin): """Update Bookmarks.""" # Used by Bookmarkd and view.bookmark. @classmethod def _get_existing_bookmarks_for_update( cls, auth_filter, comic_pks, updates ) -> tuple: # Get existing bookmarks query_filter = Q(**auth_filter) & Q(comic__in=comic_pks) existing_bookmarks = Bookmark.objects.filter(query_filter) if updates.get("page") is not None: existing_bookmarks = existing_bookmarks.annotate( page_count=F("comic__page_count") ) update_fields = (set(updates.keys()) & _BOOKMARK_UPDATE_FIELDS) | {"updated_at"} only_fields = (*update_fields, "pk") existing_bookmarks = existing_bookmarks.only(*only_fields) return existing_bookmarks, update_fields @classmethod def _prepare_bookmark_updates(cls, existing_bookmarks, updates) -> list: # Prepare updates update_bookmarks = [] for bm in existing_bookmarks: cls._update_bookmarks_validate_page(bm, updates) for key, value in updates.items(): setattr(bm, key, value) bm.updated_at = Now() update_bookmarks.append(bm) return update_bookmarks @staticmethod def _update_bookmarks_validate_page(bm, updates) -> None: """Force bookmark page into valid range.""" page = updates.get("page") if page is None: return max_page = bm.page_count + 1 page = max(min(page, max_page), 0) if page == max_page: # Auto finish on bookmark last page bm.finished = True updates["page"] = page @staticmethod def _notify_library_changed(uid) -> None: """Notify one user that their library changed.""" group = f"user_{uid}" task = NotifierTask(Notifications.BOOKMARK.value, group) LIBRARIAN_QUEUE.put(task) @classmethod def _update_bookmarks(cls, auth_filter, comic_pks, updates) -> int: """Update existing bookmarks.""" count = 0 if not updates: return count existing_bookmarks, update_fields = cls._get_existing_bookmarks_for_update( auth_filter, comic_pks, updates ) update_bookmarks = cls._prepare_bookmark_updates(existing_bookmarks, updates) count = len(update_bookmarks) # Bulk update if count: Bookmark.objects.bulk_update(update_bookmarks, tuple(update_fields)) return count @classmethod def _get_comics_without_bookmarks(cls, auth_filter, comic_pks): """Get comics without bookmarks.""" exclude = {} for key, value in auth_filter.items(): exclude["bookmark__" + key] = value query_filter = Q(pk__in=comic_pks) & ~Q(**exclude) return Comic.objects.filter(query_filter).only("pk") @classmethod def _prepare_bookmark_creates( cls, create_bookmark_comics, auth_filter, updates ) -> list: # Prepare creates create_bookmarks = [] for comic in create_bookmark_comics: bm = Bookmark(comic=comic, **auth_filter, **updates) create_bookmarks.append(bm) return create_bookmarks @classmethod def _create_bookmarks(cls, auth_filter, comic_pks, updates) -> int: """Create new bookmarks for comics that don't exist yet.""" count = 0 if not updates: return count create_bookmark_comics = cls._get_comics_without_bookmarks( auth_filter, comic_pks ) create_bookmarks = cls._prepare_bookmark_creates( create_bookmark_comics, auth_filter, updates ) count = len(create_bookmarks) # Bulk create if count: Bookmark.objects.bulk_create( create_bookmarks, update_fields=tuple(_BOOKMARK_UPDATE_FIELDS), unique_fields=Bookmark._meta.unique_together[0], ) return count @classmethod def update_bookmarks(cls, auth_filter, comic_pks, updates) -> int: """Update a user bookmark.""" count = cls._update_bookmarks(auth_filter, comic_pks, updates) count += cls._create_bookmarks(auth_filter, comic_pks, updates) if count: uid = next(iter(auth_filter.values())) cls._notify_library_changed(uid) return count ================================================ FILE: codex/librarian/bookmark/user_active.py ================================================ """Mixin for recording user active entry.""" from datetime import timedelta from django.contrib.auth.models import User from django.utils import timezone as django_timezone from codex.models.admin import UserActive from codex.views.const import EPOCH_START class UserActiveMixin: """Record user active entry.""" # only hit the disk to record user activity every so often USER_ACTIVE_RESOLUTION = timedelta(hours=1) def init_user_active(self) -> None: """Init the last recorded dict.""" self._user_active_recorded = {} # pyright: ignore[reportUninitializedInstanceVariable] def update_user_active(self, pk: int, log) -> None: """Update user active.""" # Offline because profile gets hit rapidly in succession. try: last_recorded = self._user_active_recorded.get(pk, EPOCH_START) now = django_timezone.now() if now - last_recorded > self.USER_ACTIVE_RESOLUTION: user = User.objects.get(pk=pk) UserActive.objects.update_or_create(user=user) self._user_active_recorded[pk] = now except User.DoesNotExist: pass except Exception as exc: reason = f"Update user activity {exc}" log.warning(reason) ================================================ FILE: codex/librarian/covers/__init__.py ================================================ """Comic cover operations.""" ================================================ FILE: codex/librarian/covers/coverd.py ================================================ """Functions for dealing with comic cover thumbnails.""" from typing import override from codex.librarian.covers.purge import CoverPurgeThread from codex.librarian.covers.tasks import ( CoverCreateAllTask, CoverRemoveAllTask, CoverRemoveOrphansTask, CoverRemoveTask, CoverSaveToCache, ) class CoverThread(CoverPurgeThread): """Create comic covers in it's own thread.""" @override def process_item(self, item) -> None: """Run the task method.""" task = item if isinstance(task, CoverSaveToCache): self.save_cover_to_cache(task.cover_path, task.data) elif isinstance(task, CoverRemoveAllTask): self.purge_all_comic_covers(self.librarian_queue) elif isinstance(task, CoverRemoveTask): self.purge_comic_covers(task.pks, custom=task.custom) elif isinstance(task, CoverRemoveOrphansTask): self.cleanup_orphan_covers() elif isinstance(task, CoverCreateAllTask): self.create_all_covers() else: self.log.error(f"Bad task sent to {self.__class__.__name__}: {task}") ================================================ FILE: codex/librarian/covers/create.py ================================================ """Create comic cover paths.""" from abc import ABC from io import BytesIO from multiprocessing.queues import Queue from pathlib import Path from time import time from comicbox.box import Comicbox from humanize import naturaldelta from PIL import Image from codex.librarian.covers.path import CoverPathMixin from codex.librarian.covers.status import CreateCoversStatus from codex.librarian.covers.tasks import CoverSaveToCache from codex.librarian.status import Status from codex.librarian.threads import QueuedThread from codex.models import Comic, CustomCover from codex.settings import COMICBOX_CONFIG _COVER_RATIO = 1.5372233400402415 # modal cover ratio THUMBNAIL_WIDTH = 165 THUMBNAIL_HEIGHT = round(THUMBNAIL_WIDTH * _COVER_RATIO) _THUMBNAIL_SIZE = (THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT) class CoverCreateThread(QueuedThread, CoverPathMixin, ABC): """Create methods for covers.""" @classmethod def _create_cover_thumbnail(cls, cover_image_data) -> BytesIO: """Isolate the save thumbnail function for leak detection.""" cover_thumb_buffer = BytesIO() with BytesIO(cover_image_data) as image_io: with Image.open(image_io) as cover_image: cover_image.thumbnail( _THUMBNAIL_SIZE, Image.Resampling.LANCZOS, reducing_gap=3.0, ) cover_image.save(cover_thumb_buffer, "WEBP", method=6) cover_image.close() # extra close for animated sequences return cover_thumb_buffer @classmethod def _get_comic_cover_image(cls, comic_path, log): """ Create comic cover if none exists. Return image thumb data or path to missing file thumb. """ with Comicbox(comic_path, config=COMICBOX_CONFIG, logger=log) as car: image_data = car.get_cover_page(pdf_format="pixmap") if not image_data: reason = "Read empty cover" raise ValueError(reason) return image_data @classmethod def _get_custom_cover_image(cls, cover_path): """Get cover image from image file.""" with Path(cover_path).open("rb") as f: return f.read() @classmethod def create_cover_from_path( cls, pk: int, cover_path: str, log, librarian_queue: Queue, *, custom: bool ) -> BytesIO | None: """ Create cover for path. Called from views/cover. """ db_path = None try: model = CustomCover if custom else Comic db_path = model.objects.only("path").get(pk=pk).path if custom: cover_image = cls._get_custom_cover_image(db_path) else: cover_image = cls._get_comic_cover_image(db_path, log) thumb_buffer = cls._create_cover_thumbnail(cover_image) thumb_bytes = thumb_buffer.getvalue() thumb_buffer.seek(0) except Exception as exc: thumb_bytes = b"" thumb_buffer = None cover_str = db_path or f"{pk=}" log.warning(f"Could not create cover thumbnail for {cover_str}: {exc}") task = CoverSaveToCache(cover_path, thumb_bytes) librarian_queue.put(task) return thumb_buffer def save_cover_to_cache(self, cover_path_str: str, data) -> None: """Save cover thumb image to the disk cache.""" cover_path = Path(cover_path_str) cover_path.parent.mkdir(exist_ok=True, parents=True) if data: with cover_path.open("wb") as cover_file: cover_file.write(data) elif not cover_path.exists(): # zero length file is code for missing. cover_path.touch() def _bulk_create_comic_cover( self, pk: int, status: Status, *, custom: bool ) -> None: # Create one cover cover_path = self.get_cover_path(pk, custom=custom) if cover_path.exists(): status.decrement_total() else: # bulk credit creates covers inline data = self.create_cover_from_path( pk, str(cover_path), self.log, self.librarian_queue, custom=False, ) if data: data.close() status.increment_complete() self.status_controller.update(status) def _bulk_create_comic_covers(self, pks, *, custom: bool) -> int: """Create bulk comic covers.""" num_comics = len(pks) if not num_comics: return 0 status = CreateCoversStatus(0, num_comics) try: start_time = time() self.log.debug(f"Creating {num_comics} comic covers...") self.status_controller.start(status) for pk in pks: # Create all covers. self._bulk_create_comic_cover(pk, status, custom=custom) desc = "custom" if custom else "comic" count = status.complete level = "INFO" if count else "DEBUG" elapsed = naturaldelta(time() - start_time) self.log.log(level, f"Created {count} {desc} covers in {elapsed}.") finally: self.status_controller.finish(status) return status.complete or 0 def create_all_covers(self) -> None: """Create all covers for all libraries.""" pks = CustomCover.objects.values_list("pk", flat=True) count = self._bulk_create_comic_covers(pks, custom=True) pks = Comic.objects.values_list("pk", flat=True) count += self._bulk_create_comic_covers(pks, custom=False) ================================================ FILE: codex/librarian/covers/path.py ================================================ """Cover Path functions.""" from pathlib import Path from codex.settings import ROOT_CACHE_PATH class CoverPathMixin: """Path methods for covers.""" COVERS_ROOT = ROOT_CACHE_PATH / "covers" CUSTOM_COVERS_ROOT = ROOT_CACHE_PATH / "custom-covers" _HEX_FILL = 8 _PATH_STEP = 2 _ZFILL = 12 @classmethod def _hex_path(cls, pk: int) -> Path: """Translate an integer into an efficient filesystem path.""" hex_str = format(pk, f"0{cls._HEX_FILL}x") parts = [ hex_str[i : i + cls._PATH_STEP] for i in range(0, len(hex_str), cls._PATH_STEP) ] return Path("/".join(parts)) @classmethod def get_cover_path(cls, pk: int, *, custom: bool): """Get cover path for comic pk.""" cover_path = cls._hex_path(pk) root = cls.CUSTOM_COVERS_ROOT if custom else cls.COVERS_ROOT return root / cover_path.with_suffix(".webp") @classmethod def get_cover_paths(cls, pks, *, custom: bool) -> set: """Get cover paths for many comic pks.""" cover_paths = set() for pk in pks: cover_path = cls.get_cover_path(pk, custom=custom) cover_paths.add(cover_path) return cover_paths ================================================ FILE: codex/librarian/covers/purge.py ================================================ """Purge comic covers.""" import shutil from abc import ABC from pathlib import Path from codex.librarian.covers.create import CoverCreateThread from codex.librarian.covers.status import FindOrphanCoversStatus, RemoveCoversStatus from codex.librarian.notifier.tasks import COVERS_CHANGED_TASK from codex.models import Comic from codex.models.paths import CustomCover class CoverPurgeThread(CoverCreateThread, ABC): """Cover Purge methods.""" _CLEANUP_STATUS_MAP = (FindOrphanCoversStatus, RemoveCoversStatus) @classmethod def _cleanup_cover_dirs(cls, path, cover_root) -> None: """Recursively remove empty cover directories.""" if not path or cover_root not in path.parents: return try: path.rmdir() cls._cleanup_cover_dirs(path.parent, cover_root) except OSError: pass def purge_cover_paths(self, cover_paths, cover_root) -> int: """Purge a set a cover paths.""" self.log.debug(f"Removing {len(cover_paths)} possible cover thumbnails...") status = RemoveCoversStatus(0, len(cover_paths)) try: self.status_controller.start(status) cover_dirs = set() for cover_path in cover_paths: try: cover_path.unlink() status.increment_complete() except FileNotFoundError: status.decrement_total() cover_dirs.add(cover_path.parent) self.status_controller.update(status, notify=False) for cover_dir in cover_dirs: self._cleanup_cover_dirs(cover_dir, cover_root) finally: self.status_controller.finish(status) return status.complete or 0 def purge_comic_covers(self, pks: frozenset[int], *, custom: bool) -> int: """Purge a set a cover paths.""" cover_paths = self.get_cover_paths(pks, custom=custom) cover_root = self.CUSTOM_COVERS_ROOT if custom else self.COVERS_ROOT return self.purge_cover_paths(cover_paths, cover_root) def purge_all_comic_covers(self, librarian_queue) -> None: """Purge every comic cover.""" self.log.debug("Removing entire comic cover cache.") status = RemoveCoversStatus() try: self.status_controller.start(status) if self.COVERS_ROOT.exists(): shutil.rmtree(self.COVERS_ROOT) if self.CUSTOM_COVERS_ROOT.exists(): shutil.rmtree(self.CUSTOM_COVERS_ROOT) self.log.success("Removed entire comic cover cache and custom cover cache.") except OSError as exc: self.log.warning(exc) finally: self.status_controller.finish(status) librarian_queue.put(COVERS_CHANGED_TASK) def _cleanup_orphan_covers(self, cover_class, cover_root: Path, name: str) -> None: """Remove all orphan cover thumbs.""" status = FindOrphanCoversStatus() try: self.log.debug(f"Removing covers from missing {name}.") self.status_controller.start(status) pks = cover_class.objects.all().values_list("pk", flat=True) db_cover_paths = self.get_cover_paths(pks, custom=False) orphan_cover_paths = set() for root, _, filenames in cover_root.walk(): root_path = Path(root) for fn in filenames: fs_cover_path = root_path / fn if fs_cover_path not in db_cover_paths: orphan_cover_paths.add(fs_cover_path) finally: self.status_controller.finish(status) self.purge_cover_paths(orphan_cover_paths, cover_root) def cleanup_orphan_covers(self) -> None: """Cleanup both comic and custom covers.""" self._cleanup_orphan_covers(Comic, self.COVERS_ROOT, "comics") self._cleanup_orphan_covers( CustomCover, self.CUSTOM_COVERS_ROOT, "custom covers" ) ================================================ FILE: codex/librarian/covers/status.py ================================================ """Cover status types.""" from abc import ABC from codex.librarian.status import Status class CoversStatus(Status, ABC): """Covers Status.""" ITEM_NAME = "covers" class CreateCoversStatus(CoversStatus): """Create Covers Status.""" CODE = "CCC" VERB = "Create" class RemoveCoversStatus(CoversStatus): """Purge Covers Status.""" CODE = "CRC" VERB = "Remove" log_success = True class FindOrphanCoversStatus(CoversStatus): """Find Orphan Covers Status.""" CODE = "CFO" ITEM_NAME = "orphan covers" VERB = "Find" _verbed = "Found" COVERS_STATII = ( CreateCoversStatus, RemoveCoversStatus, FindOrphanCoversStatus, ) ================================================ FILE: codex/librarian/covers/tasks.py ================================================ """Covers Tasks.""" from dataclasses import dataclass from codex.librarian.tasks import LibrarianTask @dataclass class CoverTask(LibrarianTask): """Handle with the CoverThread.""" @dataclass class CoverRemoveAllTask(CoverTask): """Remove all comic covers.""" @dataclass class CoverRemoveOrphansTask(CoverTask): """Clean up covers from missing comics.""" @dataclass class CoverRemoveTask(CoverTask): """Purge a set of comic covers.""" pks: frozenset custom: bool @dataclass class CoverSaveToCache(CoverTask): """Write cover to disk.""" cover_path: str data: bytes @dataclass class CoverCreateAllTask(CoverTask): """A create all comic covers.""" ================================================ FILE: codex/librarian/cron/__init__.py ================================================ """Crond thread.""" ================================================ FILE: codex/librarian/cron/crond.py ================================================ """Perform maintenance tasks.""" from threading import Condition, Event from time import sleep from types import MappingProxyType from typing import override from django.utils import timezone as django_timezone from codex.librarian.scribe.janitor.scheduled_time import get_janitor_time from codex.librarian.scribe.janitor.tasks import JanitorNightlyTask from codex.librarian.telemeter.scheduled_time import get_telemeter_time from codex.librarian.telemeter.tasks import TelemeterTask from codex.librarian.threads import NamedThread _TASK_TIME_FUNCTION_MAP = MappingProxyType( { JanitorNightlyTask: get_janitor_time, TelemeterTask: get_telemeter_time, } ) class CronThread(NamedThread): """Run nightly cleanups.""" def __init__(self, *args, **kwargs) -> None: """Initialize this thread with the worker.""" self._stop_event = Event() self._cond = Condition() self._task_times = () super().__init__(*args, daemon=True, **kwargs) def _create_task_times(self) -> None: task_times = {} for task_class, func in _TASK_TIME_FUNCTION_MAP.items(): if dttm := func(self.log): task_times[dttm] = task_class self._task_times = tuple(sorted(task_times.items())) def _get_timeout(self) -> int: if not self._task_times or not self._task_times[0]: self.log.warning("No scheduled jobs found. Not normal! Waiting a minute.") return 60 next_time = self._task_times[0][0] now = django_timezone.now() delta = next_time - now self.log.debug(f"Next scheduled job at {next_time} in {delta}.") return max(0, int(delta.total_seconds())) def _run_expired_jobs(self) -> None: now = django_timezone.now() for dttm, task_class in self._task_times: if dttm < now: self.librarian_queue.put(task_class()) else: # Times are always ordered so stop checking at the first future job. break @override def run(self) -> None: """Cron loop.""" try: self.run_start() with self._cond: while not self._stop_event.is_set(): self._run_expired_jobs() self._create_task_times() sleep(2) # try to fix double jobs timeout = self._get_timeout() self._cond.wait(timeout=timeout) if self._stop_event.is_set(): break sleep(2) # fix time rounding problems except Exception: self.log.exception(f"In {self.__class__.__name__}") self.log.debug(f"Stopped {self.__class__.__name__}.") def end_timeout(self) -> None: """End the timeout wait.""" with self._cond: self._cond.notify() @override def stop(self) -> None: """Stop the cron thread.""" super().stop() self._stop_event.set() self.end_timeout() ================================================ FILE: codex/librarian/fs/README.md ================================================ # File System Watcher & Poller Filesystem watching for Codex libraries using [watchfiles](https://github.com/samuelcolvin/watchfiles) (Rust `notify` backend). ## Architecture - **`watcher`** — A single thread watches all event-enabled library paths using `watchfiles.watch()`. Multiple paths go into one `watch()` call, so N libraries = 1 watcher thread. When library config changes, the watch is restarted with updated paths. - **`poller`** — Periodically compares database snapshots against disk snapshots to detect changes that filesystem events might miss (e.g. network mounts, Docker volumes). - **`event_batcherd.py`** — `WatcherEventBatcherThread`: Aggregates events from both the watcher and poller into batched `ImportTask` instances for the importer. ================================================ FILE: codex/librarian/fs/__init__.py ================================================ """File system watching for Codex libraries using watchfiles.""" ================================================ FILE: codex/librarian/fs/event_batcherd.py ================================================ """ Batch filesystem events into bulk database import tasks. Events from both the watchfiles watcher and the database poller are aggregated here by library, deduplicated, and sent to the importer as ImportTask instances. """ from contextlib import suppress from copy import deepcopy from types import MappingProxyType from typing import override from codex.librarian.fs.events import ( FSChange, FSEvent, ) from codex.librarian.fs.poller.events import ( PollEvent, PollEventType, ) from codex.librarian.memory import get_mem_limit from codex.librarian.scribe.importer.tasks import ImportTask from codex.librarian.threads import AggregateMessageQueuedThread _IMPORT_TASK_PARAMS: MappingProxyType[str, int | set[int] | dict[str, str]] = ( MappingProxyType( { "dirs_moved": {}, "dirs_modified": set(), "dirs_deleted": set(), "files_moved": {}, "files_modified": set(), "files_added": set(), "files_deleted": set(), "covers_moved": {}, "covers_modified": set(), "covers_added": set(), "covers_deleted": set(), } ) ) class FSEventBatcherThread(AggregateMessageQueuedThread): """Batch filesystem events into bulk database import tasks.""" MAX_DELAY = 60.0 MAX_ITEMS_PER_GB = 50000 @staticmethod def create_import_task_args(library_id: int) -> dict: """Create import task args.""" args = deepcopy(dict(_IMPORT_TASK_PARAMS)) args["library_id"] = library_id return args @staticmethod def _remove_paths(args, deleted_key: str, moved_key: str) -> None: for src_path in args[deleted_key]: with suppress(KeyError): del args[moved_key][src_path] @classmethod def deduplicate_events(cls, args: dict) -> None: """Prune different event types on the same paths.""" # deleted cls._remove_paths(args, "dirs_deleted", "dirs_moved") cls._remove_paths(args, "files_deleted", "files_moved") # added args["files_added"] -= args["files_deleted"] files_dest_paths = set(args["files_moved"].values()) args["files_added"] -= files_dest_paths # modified args["dirs_modified"] -= args["dirs_deleted"] args["dirs_modified"] -= set(args["dirs_moved"].values()) args["files_modified"] -= args["files_added"] args["files_modified"] -= args["files_deleted"] args["files_modified"] -= files_dest_paths args["covers_modified"] -= args["covers_deleted"] args["covers_modified"] -= args["covers_added"] def __init__(self, *args, **kwargs) -> None: """Set the total items for limiting db ops per batch.""" super().__init__(*args, **kwargs) self._total_items = 0 mem_limit_gb = get_mem_limit("g") self.max_items = int(self.MAX_ITEMS_PER_GB * mem_limit_gb) def _args_field_by_event(self, library_id: int, event: FSEvent): """Translate an event into the corresponding import task field.""" if library_id not in self.cache: self.cache[library_id] = self.create_import_task_args(library_id) key = event.diff_key args_field = self.cache[library_id].get(key) if args_field is None: reason = f"Unhandled event, not batching: {event}" raise ValueError(reason) return args_field @override def aggregate_items(self, item) -> None: """Aggregate events into cache by library.""" event = item.event try: args_field = self._args_field_by_event(item.library_id, event) if event.change == FSChange.moved: args_field[event.src_path] = event.dest_path else: args_field.add(event.src_path) self._total_items += 1 except ValueError as exc: self.log.debug(exc) if self._total_items > self.max_items: reason = ( "Event batcher hit size limit." f" Sending batch of {self._total_items} to importer." ) self.log.info(reason) self.timed_out() def _set_check_metadata_mtime(self, item) -> None: pk = item.library_id if pk not in self.cache: self.cache[pk] = self.create_import_task_args(pk) poll_event: PollEvent = item.event self.cache[pk]["check_metadata_mtime"] = not poll_event.force def _start_poll(self, item) -> None: self.set_last_send() self._set_check_metadata_mtime(item) def _finish_poll(self, item) -> None: self._set_check_metadata_mtime(item) self.send_all_items() @override def process_item(self, item) -> None: event = item.event if isinstance(event, PollEvent): if event.poll_type == PollEventType.start: self._start_poll(item) elif event.poll_type == PollEventType.finish: self._finish_poll(item) else: super().process_item(item) def _subtract_args_items(self, args) -> None: total = 0 for value in args.values(): with suppress(TypeError): total += len(value) self._total_items = max(0, self._total_items - total) def _create_task(self, library_id) -> ImportTask: """Create a task from cached aggregated message data.""" args = self.cache.pop(library_id) self._subtract_args_items(args) self.deduplicate_events(args) args["files_created"] = args.pop("files_added") args["covers_created"] = args.pop("covers_added") return ImportTask(**args) def _send_import_task(self, library_id: int) -> None: task = self._create_task(library_id) if task.total(): self.librarian_queue.put(task) else: self.log.debug("Empty task after filtering. Not sending to importer.") @override def send_all_items(self) -> None: """Send all tasks to library queue and reset events cache.""" for library_id in tuple(self.cache): self._send_import_task(library_id) # reset the event aggregates self._total_items = 0 ================================================ FILE: codex/librarian/fs/events.py ================================================ """Codex filesystem event dataclasses.""" from dataclasses import dataclass from enum import IntEnum from watchfiles import Change class FSChange(IntEnum): """Extend watchfiles Change to include moved.""" added = Change.added modified = Change.modified deleted = Change.deleted moved = deleted + 1 @dataclass(frozen=True, slots=True) class FSEvent: """A filesystem change event.""" src_path: str change: FSChange is_directory: bool = False is_cover: bool = False dest_path: str = "" # Only populated for moved events @property def diff_key(self) -> str: """Return the ImportTask field name this event maps to.""" prefix = "covers" if self.is_cover else "dirs" if self.is_directory else "files" return f"{prefix}_{self.change.name}" ================================================ FILE: codex/librarian/fs/filters.py ================================================ """Filter files with regexes.""" import re from pathlib import Path from comicbox.box import Comicbox from loguru import logger from codex.models.paths import CustomCover from codex.settings import CUSTOM_COVERS_DIR, CUSTOM_COVERS_GROUP_DIRS _IMAGE_REGEX = r"\.(jpe?g|webp|png|gif|bmp)" _IMAGE_MATCHER: re.Pattern = re.compile(_IMAGE_REGEX, re.IGNORECASE) def _build_comic_matcher() -> re.Pattern: comic_regex = r"\.(cb[zt7" unsupported = [] if Comicbox.is_unrar_supported(): comic_regex += r"r" else: unsupported.append("CBR") comic_regex += r"]" if Comicbox.is_pdf_supported(): comic_regex += r"|pdf" else: unsupported.append("PDF") comic_regex += ")$" if unsupported: un_str = ", ".join(unsupported) logger.warning(f"Cannot detect or read from {un_str} archives") return re.compile(comic_regex, re.IGNORECASE) _COMIC_MATCHER: re.Pattern = _build_comic_matcher() def _match_suffix(pattern: re.Pattern, path: Path) -> bool: """Match suffix with pattern.""" return bool(path and path.suffix and pattern.match(path.suffix) is not None) def match_comic(path: Path) -> bool: """Match comic file.""" return _match_suffix(_COMIC_MATCHER, path) def match_image(path: Path) -> bool: """Match image file.""" return _match_suffix(_IMAGE_MATCHER, path) def match_folder_cover(path: Path) -> bool: """Match a folder cover image (e.g. cover.jpg next to comics).""" return path.stem == CustomCover.FOLDER_COVER_STEM and match_image(path) def match_group_cover_image(path: Path) -> bool: """Match a custom group cover image in the custom-covers directory.""" parent = path.parent return ( parent.parent == CUSTOM_COVERS_DIR and str(parent.name) in CUSTOM_COVERS_GROUP_DIRS and match_image(path) ) ================================================ FILE: codex/librarian/fs/poller/__init__.py ================================================ """Codex Filesystem Poller.""" ================================================ FILE: codex/librarian/fs/poller/events.py ================================================ """Poller handlers (snapshot diff: has moved events, dirs, full classification).""" from dataclasses import dataclass from enum import IntEnum class PollEventType(IntEnum): """Poll evenv type.""" start = 1 finish = 2 @dataclass(frozen=True, slots=True) class PollEvent: """Signal the event batcher about poll boundaries.""" poll_type: PollEventType force: bool = False ================================================ FILE: codex/librarian/fs/poller/poller.py ================================================ """Database polling for library changes.""" from pathlib import Path from threading import Condition, Event from typing import override from django.db.models.functions import Now from django.utils import timezone from humanize import naturaldelta from codex.librarian.fs.poller.events import PollEvent, PollEventType from codex.librarian.fs.poller.snapshot import DatabaseSnapshot, DiskSnapshot from codex.librarian.fs.poller.snapshot_diff import SnapshotDiff from codex.librarian.fs.poller.status import FSPollStatus from codex.librarian.fs.poller.tasks import FSPollLibrariesTask from codex.librarian.fs.tasks import FSEventTask from codex.librarian.threads import NamedThread from codex.librarian.worker import WorkerStatusMixin from codex.models import Library from codex.views.const import EPOCH_START DOCKER_UNMOUNTED_FN = "DOCKER_UNMOUNTED_VOLUME" _DIR_NOT_FOUND_TIMEOUT = 15 * 60 _LIBRARY_ONLY = ( "path", "poll", "poll_every", "last_poll", "update_in_progress", "covers_only", ) class LibraryPollerThread(NamedThread, WorkerStatusMixin): """Poll libraries on a schedule, comparing DB snapshots against disk.""" def __init__(self, *args, **kwargs) -> None: """Initialize the poller.""" super().__init__(*args, **kwargs) self.daemon = True self._cond = Condition() self._shutdown_event = Event() self._pending_poll_ids: frozenset[int] = frozenset() self._pending_force: bool = False ############################################# # Public interface - called from librariand # ############################################# def wake(self) -> None: """Wake up the poller after library config changes.""" with self._cond: self._cond.notify() def poll(self, task: FSPollLibrariesTask) -> None: """Trigger an immediate poll for specific libraries.""" with self._cond: self._pending_poll_ids = task.library_ids or frozenset( Library.objects.values_list("id", flat=True) ) self._pending_force = task.force self._cond.notify() @override def stop(self) -> None: """Signal the poller to shut down.""" super().stop() self._shutdown_event.set() with self._cond: self._cond.notify() ####################### # Timeout computation # ####################### def _get_poll_timeout(self, library: Library) -> float | None: # noqa: PLR0911 """ Compute seconds until this library's next scheduled poll. Returns None to wait forever (manual poll only). """ watch_path = Path(library.path) unmounted_marker = watch_path / DOCKER_UNMOUNTED_FN if not library.poll: self.log.info(f"Library {library.path} waiting for manual poll.") return None if not watch_path.is_dir(): self.log.warning(f"Library {library.path} not found. Not polling.") return _DIR_NOT_FOUND_TIMEOUT if unmounted_marker.exists(): warning = f"Library {library.path} looks like an unmounted docker volume. Not polling." self.log.warning(warning) return _DIR_NOT_FOUND_TIMEOUT if not tuple(watch_path.iterdir()): self.log.warning( f"{library.path} is empty. Suspect unmounted. Not polling." ) return _DIR_NOT_FOUND_TIMEOUT if library.update_in_progress: self.log.debug(f"Library {library.path} update in progress. Not polling.") return self._seconds_until_poll(library) if not library.last_poll: self.log.debug(f"First ever poll for {library.path}") return 0 return self._seconds_until_poll(library) @staticmethod def _seconds_until_poll(library: Library) -> float: """Seconds remaining until the next scheduled poll.""" last_poll = library.last_poll or EPOCH_START since_last = timezone.now() - last_poll return max(0, library.poll_every.total_seconds() - since_last.total_seconds()) ###################################### # Snapshot diff and event generation # ###################################### def _get_diff(self, library: Library, *, force: bool) -> SnapshotDiff | None: """Compute the diff between DB and disk for a library.""" covers_only = library.covers_only ignore_device = True db_snap = DatabaseSnapshot( library.path, self.log, covers_only=covers_only, ignore_device=ignore_device, force=force, ) disk_snap = DiskSnapshot( library.path, self.log, covers_only=covers_only, ignore_device=ignore_device ) if len(disk_snap.paths) <= 1: self.log.warning(f"{library.path} dir snapshot is empty. Not polling.") return None return SnapshotDiff(db_snap, disk_snap) def _queue_poll_events(self, library: Library, *, force: bool) -> None: """Run the snapshot diff and queue resulting events.""" diff = self._get_diff(library, force=force) if not diff or diff.is_empty(): self.log.debug(f"Nothing changed for {library.path}") return debug_log = ( f"Poller found: {len(diff.files_deleted)} deleted, " f"{len(diff.files_modified)} modified, " f"{len(diff.files_added)} added, " f"{len(diff.files_moved)} moved files. " f"{len(diff.dirs_deleted)} deleted, " f"{len(diff.dirs_modified)} modified, " f"{len(diff.dirs_moved)} moved dirs." ) self.log.debug(debug_log) pk = library.pk # Signal batcher: poll starting start = FSEventTask(pk, PollEvent(PollEventType.start, force=force)) self.librarian_queue.put(start) # Send all diff events for event in diff.to_events(): task = FSEventTask(pk, event) self.librarian_queue.put(task) # Signal batcher: poll finished — flush the batch finish = FSEventTask(pk, PollEvent(PollEventType.finish, force=force)) self.librarian_queue.put(finish) def _poll_library(self, library: Library, *, force: bool) -> None: """Poll a single library with status tracking.""" if self.db_write_lock.locked(): self.log.warning(f"Database locked, not polling {library.path}.") return status = FSPollStatus(subtitle=library.path) try: self.status_controller.start(status) self.log.debug(f"Polling {library.path}...") self._queue_poll_events(library, force=force) library.last_poll = Now() library.save() except Exception: self.log.exception(f"Poll {library.path}") finally: self.status_controller.finish(status) ############# # Main loop # ############# def _get_min_timeout(self) -> float | None: """Find the shortest timeout across all polled libraries.""" min_timeout: float | None = None try: libraries = Library.objects.all().only(*_LIBRARY_ONLY) for library in libraries: try: timeout = self._get_poll_timeout(library) except FileNotFoundError: self.log.warning(f"Library {library.path} not found.") continue if timeout is None: continue if timeout == 0: return 0 if min_timeout is None or timeout < min_timeout: min_timeout = timeout except Exception: self.log.exception("Computing next poll timeout") return 60 # Retry in a minute on error return min_timeout def _poll_due_libraries(self, *, force: bool = False) -> None: """Poll all libraries that are due.""" try: libraries = Library.objects.all().only(*_LIBRARY_ONLY) for library in libraries: try: timeout = self._get_poll_timeout(library) except FileNotFoundError: continue if timeout is not None and timeout <= 0: self._poll_library(library, force=force) except Exception: self.log.exception("Polling due libraries") def _handle_pending_polls(self) -> None: """Handle manually triggered polls.""" poll_ids = self._pending_poll_ids force = self._pending_force self._pending_poll_ids = frozenset() self._pending_force = False try: qs = Library.objects.all() if poll_ids: qs = qs.filter(pk__in=poll_ids) qs.only(*_LIBRARY_ONLY) for library in qs: self._poll_library(library, force=force) except Exception: self.log.exception(f"Manual library poll {poll_ids}") @override def run(self) -> None: """Poller main loop.""" self.run_start() while not self._shutdown_event.is_set(): try: # Handle any pending manual polls first if self._pending_poll_ids: self._handle_pending_polls() # Find the next scheduled poll time timeout = self._get_min_timeout() if timeout is not None and timeout <= 0: self._poll_due_libraries() continue # Sleep until next poll or until woken if timeout is not None: self.log.debug(f"Next poll in {naturaldelta(timeout)}.") with self._cond: self._cond.wait(timeout) except Exception: self.log.exception(f"{self.__class__.__name__} loop error") self.log.debug(f"Stopped {self.__class__.__name__}") ================================================ FILE: codex/librarian/fs/poller/snapshot.py ================================================ """Filesystem and database snapshot classes for change detection.""" import os from collections.abc import Iterator from itertools import chain from pathlib import Path from stat import S_ISDIR from codex.librarian.fs.filters import ( match_comic, match_folder_cover, match_group_cover_image, ) from codex.models import Comic, CustomCover, FailedImport, Folder IGNORE_ST_DEV = 0 class Snapshot: """Base snapshot: a mapping of paths to stat results.""" def __init__( self, root: str, logger_, *, covers_only: bool, ignore_device: bool = True ) -> None: """Initialize empty snapshot.""" self._root = root self.log = logger_ self._covers_only = covers_only self._ignore_device = ignore_device self._stat_info: dict[str, os.stat_result] = {} self._device_inode_to_path: dict[tuple[int, int], str] = {} def _inode(self, st: os.stat_result) -> tuple[int, int]: """Build a device:inode Key.""" st_dev = IGNORE_ST_DEV if self._ignore_device else st.st_dev return (st_dev, st.st_ino) def _set_lookups(self, path: str, st: os.stat_result) -> None: """Populate the lookup dicts for a single path.""" self._stat_info[path] = st inode_key = self._inode(st) self._device_inode_to_path[inode_key] = path @property def paths(self) -> frozenset[str]: """All known paths.""" return frozenset(self._stat_info.keys()) def inode(self, path: str) -> tuple[int, int]: """Return (device, inode) for a path.""" st = self._stat_info[path] return self._inode(st) def path(self, st_lookup: tuple[int, int]) -> str | None: """Return the path for an device, inode pair.""" return self._device_inode_to_path.get(st_lookup) def mtime(self, path: str) -> float: """Return mtime for a path.""" return self._stat_info[path].st_mtime def size(self, path: str) -> int: """Return size for a path.""" return self._stat_info[path].st_size def is_dir(self, path: str) -> bool: """Return whether path is a directory.""" return S_ISDIR(self._stat_info[path].st_mode) def is_cover(self, path: str) -> bool: """Return whether path is a cover.""" return self._covers_only or match_folder_cover(Path(path)) class DiskSnapshot(Snapshot): """Snapshot of the filesystem taken by walking a directory tree.""" def __init__( self, root: str, logger_, *, covers_only: bool, recursive: bool = True, follow_symlinks: bool = True, ignore_device: bool = True, ) -> None: """Walk the directory and stat every entry.""" super().__init__( root, logger_, covers_only=covers_only, ignore_device=ignore_device ) self._recursive = recursive self._follow_symlinks = follow_symlinks self._init_walk() def _init_walk(self): """Walk disk fs tree.""" root_stat = Path(self._root).stat() self._set_lookups(self._root, root_stat) self._walk(self._root) def _walk(self, root: str) -> None: """Walk the directory tree and populate lookups.""" for entry in os.scandir(root): path = Path(entry.path) is_dir = entry.is_dir(follow_symlinks=self._follow_symlinks) if is_dir: if not self._recursive: continue elif self._covers_only: if not match_group_cover_image(path): continue elif not (match_comic(path) or match_folder_cover(path)): continue try: st = entry.stat(follow_symlinks=self._follow_symlinks) except OSError: continue self._set_lookups(entry.path, st) if is_dir: self._walk(entry.path) class DatabaseSnapshot(Snapshot): """Snapshot of what the Codex database knows about a library's files.""" _MODELS = (Folder, Comic, FailedImport, CustomCover) _COVERS_ONLY_MODELS = (CustomCover,) _STAT_LEN = 10 def __init__( self, root: str, logger_, *, covers_only: bool = False, ignore_device: bool = True, force: bool = False, ) -> None: """Build snapshot from database records for the given library root.""" super().__init__( root, logger_, covers_only=covers_only, ignore_device=ignore_device ) self._force = force self._init_walk() def _init_walk(self): """Walk database fs tree.""" root_path = Path(self._root) if not root_path.is_dir(): self.log.warning(f"{self._root} not found, cannot snapshot.") return # Add the library root itself root_stat = root_path.stat() self._set_lookups(self._root, root_stat) models = self._COVERS_ONLY_MODELS if self._covers_only else self._MODELS for wp in chain.from_iterable(self._walk(self._root, models)): st = self._create_stat(wp, force=self._force) self._set_lookups(wp["path"], st) @staticmethod def _walk(root: str, models: tuple) -> Iterator: """Yield querysets of {path, stat} dicts for each model.""" for model in models: yield ( model.objects.filter(library__path=root) .order_by("path") .values("path", "stat") ) def _create_stat(self, wp: dict, *, force: bool) -> os.stat_result: """Turn a database JSON stat array into an os.stat_result.""" stat = wp["stat"] if not stat or len(stat) != self._STAT_LEN or not stat[1]: path = Path(wp["path"]) if path.exists(): self.log.debug(f"Force modify path with missing db stat: {path}") stat = list(path.stat()) stat[8] = 0.0 # Fake mtime triggers modified event else: self.log.debug( f"Force delete missing path with missing db stat: {path}" ) stat = list(Comic.ZERO_STAT) if force: stat = list(stat) stat[8] = 0.0 # Fake mtime triggers modified event return os.stat_result(tuple(stat)) ================================================ FILE: codex/librarian/fs/poller/snapshot_diff.py ================================================ """ Compute the diff between two snapshots. Supports inode-based move detection and optional device-ignoring for Docker/complex filesystems. """ from dataclasses import dataclass from codex.librarian.fs.events import ( FSChange, FSEvent, ) from codex.librarian.fs.poller.snapshot import Snapshot _DIFF_FIELD_EVENT_MAP: tuple[tuple[str, FSChange, bool, bool], ...] = ( # diff_attr, change_type, is_directory, is_cover ("files_deleted", FSChange.deleted, False, False), ("files_modified", FSChange.modified, False, False), ("files_added", FSChange.added, False, False), ("covers_deleted", FSChange.deleted, False, True), ("covers_modified", FSChange.modified, False, True), ("covers_added", FSChange.added, False, True), ("dirs_deleted", FSChange.deleted, True, False), ("dirs_modified", FSChange.modified, True, False), ) _DIFF_MOVED_FIELD_EVENT_MAP: tuple[tuple[str, bool, bool], ...] = ( # diff_attr, is_directory, is_cover ("files_moved", False, False), ("covers_moved", False, True), ("dirs_moved", True, False), ) @dataclass class _DiffData: """Mutable working state for diff computation.""" ref: Snapshot snapshot: Snapshot added: set[str] deleted: set[str] modified: set[str] moved: set[tuple[str, str]] unchanged: frozenset[str] class SnapshotDiff: """Diff between a reference snapshot and a new snapshot.""" def _init_added(self, data: _DiffData, snapshot: Snapshot): for p in data.added: if snapshot.is_dir(p): self.dirs_added.append(p) elif snapshot.is_cover(p): self.covers_added.append(p) else: self.files_added.append(p) def _init_deleted(self, data: _DiffData, ref: Snapshot): for p in data.deleted: if ref.is_dir(p): self.dirs_deleted.append(p) elif ref.is_cover(p): self.covers_deleted.append(p) else: self.files_deleted.append(p) def _init_modified(self, data: _DiffData, snapshot: Snapshot): for p in data.modified: if snapshot.is_dir(p): self.dirs_modified.append(p) elif snapshot.is_cover(p): self.covers_modified.append(p) else: self.files_modified.append(p) def _init_moved(self, data: _DiffData, ref: Snapshot): for f, t in data.moved: if ref.is_dir(f): self.dirs_moved.append((f, t)) elif ref.is_cover(t): self.covers_moved.append((f, t)) else: self.files_moved.append((f, t)) def __init__( self, ref: Snapshot, snapshot: Snapshot, ) -> None: """Compute the diff between ref (old/database) and snapshot (new/disk).""" data = _DiffData( ref=ref, snapshot=snapshot, added=set(snapshot.paths - ref.paths), deleted=set(ref.paths - snapshot.paths), modified=set(), moved=set(), unchanged=frozenset(ref.paths & snapshot.paths), ) self._check_unchanged_for_inode_changes(data) self._find_moved_paths(data) self._find_modified_paths(data) self.dirs_added = [] self.covers_added = [] self.files_added = [] self._init_added(data, snapshot) self.dirs_deleted = [] self.covers_deleted = [] self.files_deleted = [] self._init_deleted(data, ref) self.dirs_modified = [] self.covers_modified = [] self.files_modified = [] self._init_modified(data, snapshot) self.dirs_moved = [] self.covers_moved = [] self.files_moved = [] self._init_moved(data, ref) def _is_inode_equal(self, data: _DiffData, path: str) -> bool: """Return whether inodes match between ref and snapshot.""" return data.ref.inode(path) == data.snapshot.inode(path) def _is_stats_equal(self, data: _DiffData, old_path: str, new_path: str) -> bool: """Return whether mtime and size match.""" return data.ref.mtime(old_path) == data.snapshot.mtime( new_path ) and data.ref.size(old_path) == data.snapshot.size(new_path) def _check_unchanged_for_inode_changes(self, data: _DiffData) -> None: """Check unchanged paths for inode changes (file replaced in-place).""" for path in data.unchanged: if not self._is_inode_equal(data, path): data.modified.add(path) def _find_moved_paths(self, data: _DiffData) -> None: """Detect moves by matching inodes between deleted and added sets.""" for old_path in tuple(data.deleted): inode = data.ref.inode(old_path) if new_path := data.snapshot.path(inode): data.deleted.remove(old_path) data.moved.add((old_path, new_path)) for new_path in tuple(data.added): inode = data.snapshot.inode(new_path) if old_path := data.ref.path(inode): data.added.remove(new_path) data.moved.add((old_path, new_path)) def _find_modified_paths(self, data: _DiffData) -> None: """Find paths with changed stats (mtime/size).""" for path in data.unchanged: if self._is_inode_equal(data, path) and not self._is_stats_equal( data, path, path ): data.modified.add(path) for old_path, new_path in data.moved: if not self._is_stats_equal(data, old_path, new_path): data.modified.add(new_path) def is_empty(self) -> bool: """Return True if no changes were detected.""" return not any( ( self.files_added, self.files_deleted, self.files_modified, self.files_moved, self.dirs_added, self.dirs_deleted, self.dirs_modified, self.dirs_moved, self.covers_added, self.covers_deleted, self.covers_modified, self.covers_moved, ) ) def to_events(self) -> tuple[FSEvent, ...]: """Convert a SnapshotDiff into a sequence of FSEvents.""" events: list[FSEvent] = [] events.extend( [ FSEvent( src_path=src_path, change=change, is_directory=is_dir, is_cover=is_cover, ) for attr, change, is_dir, is_cover in _DIFF_FIELD_EVENT_MAP for src_path in getattr(self, attr) ] ) events.extend( [ FSEvent( src_path=src_path, change=FSChange.moved, is_directory=is_dir, is_cover=is_cover, dest_path=dest_path, ) for attr, is_dir, is_cover in _DIFF_MOVED_FIELD_EVENT_MAP for src_path, dest_path in getattr(self, attr) ] ) return tuple(events) ================================================ FILE: codex/librarian/fs/poller/status.py ================================================ """Watcher Statii.""" from codex.librarian.fs.status import FSStatus class FSPollStatus(FSStatus): """FS Poll Status.""" CODE = "WPO" VERB = "Poll" _verbed = "Polled" ITEM_NAME = "library" SINGLE = True FS_STATII = (FSPollStatus,) ================================================ FILE: codex/librarian/fs/poller/tasks.py ================================================ """Poller Tasks.""" from dataclasses import dataclass from codex.librarian.fs.tasks import FSTask @dataclass class FSPollLibrariesTask(FSTask): """Tell poller to poll these libraries now.""" library_ids: frozenset force: bool ================================================ FILE: codex/librarian/fs/status.py ================================================ """Watcher Statii.""" from abc import ABC from codex.librarian.status import Status class FSStatus(Status, ABC): """File System Statii.""" ================================================ FILE: codex/librarian/fs/tasks.py ================================================ """Filesystem Tasks.""" from dataclasses import dataclass from codex.librarian.fs.events import FSEvent from codex.librarian.fs.poller.events import PollEvent from codex.librarian.tasks import LibrarianTask @dataclass class FSTask(LibrarianTask): """Filesystem tasks.""" @dataclass class FSEventTask(FSTask): """Task for filesystem events and poll start and stop events.""" library_id: int event: FSEvent | PollEvent ================================================ FILE: codex/librarian/fs/watcher/__init__.py ================================================ """File System Watcher.""" ================================================ FILE: codex/librarian/fs/watcher/data.py ================================================ """Dataclass for events post processing changes.""" from dataclasses import dataclass, field from codex.librarian.fs.events import FSEvent @dataclass class ChangeBatch: """Accumulated changes from a single watchfiles batch.""" # (library_pk, FSEvent) pairs, grouped by change type added: list[tuple[int, FSEvent]] = field(default_factory=list) deleted: list[tuple[int, FSEvent]] = field(default_factory=list) modified: list[tuple[int, FSEvent]] = field(default_factory=list) # Dir deletes expanded from DB, kept separate so they bypass move matching dir_deleted: list[tuple[int, FSEvent]] = field(default_factory=list) ================================================ FILE: codex/librarian/fs/watcher/dirs.py ================================================ """Add missing events for directories.""" import os from pathlib import Path from loguru import logger from codex.librarian.fs.events import FSChange, FSEvent from codex.librarian.fs.filters import ( match_comic, match_folder_cover, match_group_cover_image, ) from codex.librarian.fs.watcher.data import ChangeBatch from codex.models.comic import Comic from codex.models.groups import Folder from codex.models.paths import CustomCover, FailedImport def _classify_added_file(path: Path, *, covers_only: bool) -> FSEvent | None: """Return an added FSEvent if the path is a relevant file, else None.""" if covers_only: if match_group_cover_image(path): return FSEvent(src_path=str(path), change=FSChange.added, is_cover=True) return None if match_comic(path): return FSEvent(src_path=str(path), change=FSChange.added) if match_folder_cover(path): return FSEvent(src_path=str(path), change=FSChange.added, is_cover=True) return None def expand_dir_added( dir_path: str, library_pk: int, batch: ChangeBatch, *, covers_only: bool, ) -> None: """Walk a newly added directory and add child events to the batch.""" root = Path(dir_path) if not root.is_dir(): return count = 0 for dirpath, _dirnames, filenames in os.walk(root): for filename in filenames: file_path = Path(dirpath) / filename if event := _classify_added_file(file_path, covers_only=covers_only): batch.added.append((library_pk, event)) count += 1 if count: logger.debug(f"Expanded dir added {dir_path} -> {count} child events") def expand_dir_deleted(dir_path: str, library_pk: int, batch: ChangeBatch) -> None: """Query the DB for paths under a deleted directory and add events to the batch.""" # The directory itself batch.dir_deleted.append( ( library_pk, FSEvent(src_path=dir_path, change=FSChange.deleted, is_directory=True), ) ) # Child folders child_folder_paths = Folder.objects.filter( library_id=library_pk, path__startswith=dir_path ).values_list("path", flat=True) for path in child_folder_paths: if path != dir_path: batch.dir_deleted.append( ( library_pk, FSEvent(src_path=path, change=FSChange.deleted, is_directory=True), ) ) # Child comics and failed imports for model in (Comic, FailedImport): child_paths = model.objects.filter( library_id=library_pk, path__startswith=dir_path ).values_list("path", flat=True) for path in child_paths: batch.deleted.append( (library_pk, FSEvent(src_path=path, change=FSChange.deleted)) ) # Child custom covers cover_paths = CustomCover.objects.filter( library_id=library_pk, path__startswith=dir_path ).values_list("path", flat=True) for path in cover_paths: batch.deleted.append( ( library_pk, FSEvent(src_path=path, change=FSChange.deleted, is_cover=True), ) ) ================================================ FILE: codex/librarian/fs/watcher/events.py ================================================ """ Process raw watchfiles changes into rich FSEvents. Handles three capabilities that raw watchfiles doesn't provide: 1. Directory added -> walk recursively and emit child file events 2. Directory deleted -> query DB for orphaned children to delete 3. Move detection -> match delete+add pairs by inode within a batch """ from pathlib import Path from watchfiles import Change from codex.librarian.fs.events import FSChange, FSEvent from codex.librarian.fs.filters import match_folder_cover from codex.librarian.fs.watcher.data import ChangeBatch from codex.librarian.fs.watcher.dirs import expand_dir_added, expand_dir_deleted from codex.librarian.fs.watcher.move import detect_moves from codex.models.groups import Folder def _process_change( change_enum: Change, path: str, library_pk: int, batch: ChangeBatch, *, covers_only: bool, ) -> None: """Classify a single raw change and append to the batch.""" p = Path(path) is_cover = covers_only or match_folder_cover(Path(path)) if change_enum == Change.added: if p.is_dir(): expand_dir_added(path, library_pk, batch, covers_only=is_cover) else: batch.added.append( ( library_pk, FSEvent(src_path=path, change=FSChange.added, is_cover=is_cover), ) ) elif change_enum == Change.deleted: is_known_dir = Folder.objects.filter(library_id=library_pk, path=path).exists() if is_known_dir: expand_dir_deleted(path, library_pk, batch) else: batch.deleted.append( ( library_pk, FSEvent(src_path=path, change=FSChange.deleted, is_cover=is_cover), ) ) elif change_enum == Change.modified: is_dir = p.is_dir() batch.modified.append( ( library_pk, FSEvent( src_path=path, change=FSChange.modified, is_directory=is_dir, is_cover=is_cover, ), ) ) def _find_library( library_paths: dict[str, int], covers_only_paths: set[str], file_path: str ) -> tuple[int, bool] | None: """Find which library a changed path belongs to.""" for lib_path, pk in library_paths.items(): if file_path.startswith(lib_path): return pk, lib_path in covers_only_paths return None def process_changes( changes: set[tuple[Change, str]], library_paths: dict[str, int], covers_only_paths: set[str], ) -> list[tuple[int, FSEvent]]: """ Process a batch of raw watchfiles changes into (library_pk, FSEvent) pairs. library_lookup is called exactly once per raw change. All downstream processing uses the library_pk stored in the batch. """ batch = ChangeBatch() # Single pass: classify each raw change, calling library_lookup once each for change_enum, path in changes: result = _find_library(library_paths, covers_only_paths, path) if not result: continue library_pk, covers_only = result _process_change(change_enum, path, library_pk, batch, covers_only=covers_only) # Detect moves (mutates batch.added and batch.deleted in place) move_events = detect_moves(batch) # Assemble output: moves + remaining adds + remaining deletes + dir deletes + modified output: list[tuple[int, FSEvent]] = [] output.extend(move_events) output.extend(batch.added) output.extend(batch.deleted) output.extend(batch.dir_deleted) output.extend(batch.modified) return output ================================================ FILE: codex/librarian/fs/watcher/move.py ================================================ """Watchfiles Move detection.""" from pathlib import Path from loguru import logger from codex.librarian.fs.events import FSChange, FSEvent from codex.librarian.fs.watcher.data import ChangeBatch from codex.models.comic import Comic from codex.models.groups import Folder from codex.models.paths import CustomCover # stat field index for inode _INODE_INDEX = 1 def _model_for_event(event: FSEvent): """Return the single Django model to query for this event's inode.""" if event.is_cover: return CustomCover if event.is_directory: return Folder return Comic def _get_db_inode(event: FSEvent, library_pk: int) -> int | None: """Look up the inode for a path from the database stat field.""" model = _model_for_event(event) stat = ( model.objects.filter(library_id=library_pk, path=event.src_path) .values_list("stat", flat=True) .first() ) if stat and len(stat) > _INODE_INDEX and stat[_INODE_INDEX]: return stat[_INODE_INDEX] return None def _get_disk_inode(path: str) -> int | None: """Stat a path on disk and return its inode, or None.""" try: p = Path(path) return p.stat().st_ino except OSError: return None def _detect_one_move( add_idx: int, add_value: tuple[int, FSEvent], deleted_by_inode, move_events, matched_added, matched_deleted, ) -> None: add_lib_pk, add_event = add_value disk_inode = _get_disk_inode(add_event.src_path) if not disk_inode: return match = deleted_by_inode.get(disk_inode) if not match: return del_idx, del_lib_pk, del_event = match # Only match within the same library if add_lib_pk != del_lib_pk: return is_dir = Path(add_event.src_path).is_dir() is_cover = add_event.is_cover or del_event.is_cover move_events.append( ( add_lib_pk, FSEvent( src_path=del_event.src_path, change=FSChange.moved, dest_path=add_event.src_path, is_directory=is_dir, is_cover=is_cover, ), ) ) matched_added.add(add_idx) matched_deleted.add(del_idx) del deleted_by_inode[disk_inode] def detect_moves(batch: ChangeBatch) -> list[tuple[int, FSEvent]]: """ Match deleted+added pairs by inode to detect moves. Returns move events. Matched FSEvents are removed from batch.added and batch.deleted in place. """ # Build inode -> (index, library_pk, event) from deleted list deleted_by_inode: dict[int, tuple[int, int, FSEvent]] = {} for idx, (lib_pk, event) in enumerate(batch.deleted): inode = _get_db_inode(event, lib_pk) if inode: deleted_by_inode[inode] = (idx, lib_pk, event) if not deleted_by_inode: return [] move_events: list[tuple[int, FSEvent]] = [] matched_added: set[int] = set() # indices into batch.added matched_deleted: set[int] = set() # indices into batch.deleted for add_idx, add_val in enumerate(batch.added): _detect_one_move( add_idx, add_val, deleted_by_inode, move_events, matched_added, matched_deleted, ) # Remove matched entries from added and deleted (reverse order to keep indices valid) batch.added = [ pair for idx, pair in enumerate(batch.added) if idx not in matched_added ] batch.deleted = [ pair for idx, pair in enumerate(batch.deleted) if idx not in matched_deleted ] if move_events: logger.debug(f"Detected {len(move_events)} move(s) from inode matching") return move_events ================================================ FILE: codex/librarian/fs/watcher/status.py ================================================ """Watcher Statii.""" from codex.librarian.fs.status import FSStatus class FSWatcherRestartStatus(FSStatus): """FS Watcher Restart Status.""" CODE = "WRS" VERB = "Restart" _verbed = "Restarted" ITEM_NAME = "file watcher" SINGLE = True log_success = True WATCHER_STATII = (FSWatcherRestartStatus,) ================================================ FILE: codex/librarian/fs/watcher/tasks.py ================================================ """Restart Watcher to sync with database.""" from dataclasses import dataclass from codex.librarian.fs.tasks import FSTask @dataclass class FSWatcherRestartTask(FSTask): """Restart the Watcher.""" ================================================ FILE: codex/librarian/fs/watcher/watcher.py ================================================ """Filesystem watcher using watchfiles.""" from pathlib import Path from threading import Event from time import sleep from typing import override from watchfiles import Change, watch from codex.librarian.fs.filters import ( match_comic, match_folder_cover, match_group_cover_image, ) from codex.librarian.fs.tasks import FSEventTask from codex.librarian.fs.watcher.events import process_changes from codex.librarian.fs.watcher.status import FSWatcherRestartStatus from codex.librarian.threads import NamedThread from codex.models import Library _MAX_PATH_WATCH_RETRIES = 1 class CodexWatchFilter: """Watchfiles watcher class for both types of library.""" def __init__(self, covers_only_paths: set[str]): """Set covers_only_paths.""" self._covers_only_paths = covers_only_paths def __call__(self, change: Change, path: str) -> bool: """ Filter method. Deleted paths can't be inspected on disk, so let them all through; event processing filters by DB lookup and suffix matching instead. """ if change == Change.deleted: return True ppath = Path(path) covers_only = False for covers_only_path in self._covers_only_paths: if ppath.is_relative_to(covers_only_path): covers_only = True break if covers_only: return match_group_cover_image(ppath) return ppath.is_dir() or match_comic(ppath) or match_folder_cover(ppath) class LibraryWatcherThread(NamedThread): """Watch all event-enabled library paths for filesystem changes.""" def __init__(self, *args, **kwargs) -> None: """Initialize the watcher.""" super().__init__(*args, **kwargs) self.daemon = True self._library_paths: dict[str, int] = {} # path to library_pk self._covers_only_paths: set[str] = set() self._restart_event = Event() self._shutdown_event = Event() def _log_update_paths_from_db(self, new_paths_dict: dict[str, int]): old_paths = frozenset(self._library_paths.keys()) new_paths = frozenset(new_paths_dict.keys()) if old_paths == new_paths: return added = new_paths - old_paths removed = old_paths - new_paths if added: self.log.info(f"FS adding paths: {added}") if removed: self.log.info(f"FS removing paths: {removed}") def _update_paths_from_db(self) -> None: """Query the database for current library paths to watch.""" new_paths_dict: dict[str, int] = {} new_covers: set[str] = set() try: libraries = ( Library.objects.filter(events=True) .all() .only("pk", "path", "covers_only") ) for library in libraries: try: new_paths_dict[library.path] = library.pk if library.covers_only: new_covers.add(library.path) except Exception: self.log.exception(f"Processing library {library.pk}") except Exception: self.log.exception("Querying libraries for watcher") return self._log_update_paths_from_db(new_paths_dict) self._library_paths = new_paths_dict self._covers_only_paths = new_covers ############################################# # Public interface - called from librariand # ############################################# def restart(self) -> None: """Update watched paths from the database.""" status = FSWatcherRestartStatus() try: self.status_controller.start(status) self._update_paths_from_db() self._restart_event.set() finally: self.status_controller.finish(status) ############# # Main loop # ############# def _process_changes(self, changes: set[tuple[Change, str]]) -> None: """Route watchfiles changes through processing to the librarian queue.""" # Watchfiles does not expand events for added or removed directories or do move detection # So handle this myself. for library_pk, event in process_changes( changes, self._library_paths, self._covers_only_paths ): task = FSEventTask(library_pk, event) self.librarian_queue.put(task) def _get_extant_paths(self, paths: list[str]) -> list[str]: extant_paths = [] for path in paths: retry = 0 while retry <= _MAX_PATH_WATCH_RETRIES: if Path(path).is_dir(): extant_paths.append(path) level = "INFO" if retry else "DEBUG" self.log.log(level, f"Watching {path}") break self.log.warning(f"Waiting 5 seconds for {path} to appear...") sleep(5) retry += 1 else: self.log.warning(f"{path} does not seem to exist, not watching.") plural = "s" if len(extant_paths) != 1 else "" self.log.info(f"Watching {len(extant_paths)} library path{plural} for events.") return extant_paths def _watch_loop(self) -> None: """Run the watchfiles loop, restarting when paths change.""" watch_filter = CodexWatchFilter(self._covers_only_paths) while not self._shutdown_event.is_set(): self._restart_event.clear() paths = list(self._library_paths.keys()) if not paths: # No paths to watch — wait for a sync signal self._restart_event.wait(timeout=5.0) continue extant_paths = self._get_extant_paths(paths) try: for changes in watch( *extant_paths, stop_event=self._restart_event, recursive=True, watch_filter=watch_filter, ): if self._shutdown_event.is_set(): return self._process_changes(changes) except FileNotFoundError as exc: self.log.warning(f"Watch path disappeared: {exc}") except Exception: self.log.exception("FS error") @override def run(self) -> None: """Thread entry point.""" self.run_start() self._update_paths_from_db() self._watch_loop() self.log.debug(f"Stopped {self.__class__.__name__}") @override def stop(self) -> None: """Signal the watcher to shut down.""" super().stop() self._shutdown_event.set() self._restart_event.set() # Unblock if waiting ================================================ FILE: codex/librarian/librariand.py ================================================ """Library process worker for background tasks.""" from copy import copy from multiprocessing import Process, Queue from threading import Lock, active_count from types import MappingProxyType from typing import Any, NamedTuple, override from caseconverter import snakecase from django.db import close_old_connections from codex.librarian.bookmark.bookmarkd import BookmarkThread # typos:ignore from codex.librarian.bookmark.tasks import BookmarkTask from codex.librarian.covers.coverd import ( # codespell:ignore coverd, typos:ignore CoverThread, ) from codex.librarian.covers.tasks import CoverTask from codex.librarian.cron.crond import CronThread from codex.librarian.fs.event_batcherd import FSEventBatcherThread from codex.librarian.fs.poller.poller import LibraryPollerThread from codex.librarian.fs.poller.tasks import FSPollLibrariesTask from codex.librarian.fs.tasks import FSEventTask from codex.librarian.fs.watcher.tasks import FSWatcherRestartTask from codex.librarian.fs.watcher.watcher import LibraryWatcherThread from codex.librarian.notifier.notifierd import NotifierThread from codex.librarian.notifier.tasks import NotifierTask from codex.librarian.restarter.restarter import CodexRestarter from codex.librarian.restarter.tasks import CodexRestarterTask from codex.librarian.scribe.janitor.tasks import JanitorAdoptOrphanFoldersTask from codex.librarian.scribe.scribed import ScribeThread from codex.librarian.scribe.search.tasks import SearchIndexSyncTask from codex.librarian.scribe.tasks import ScribeTask from codex.librarian.status_controller import StatusController from codex.librarian.tasks import LibrarianShutdownTask, LibrarianTask, WakeCronTask from codex.librarian.threads import NamedThread _THREAD_CLASSES: tuple[type[NamedThread], ...] = ( BookmarkThread, CoverThread, CronThread, LibraryWatcherThread, LibraryPollerThread, NotifierThread, ScribeThread, FSEventBatcherThread, ) _THREAD_CLASS_MAP: MappingProxyType[str, type[NamedThread]] = MappingProxyType( {snakecase(thread_class.__name__): thread_class for thread_class in _THREAD_CLASSES} ) LibrarianThreads = NamedTuple("LibrarianThreads", tuple(_THREAD_CLASS_MAP.items())) # ty: ignore[invalid-named-tuple] _THREAD_QUEUE_TASK_MAP: dict[type, str] = { CoverTask: "cover_thread", BookmarkTask: "bookmark_thread", NotifierTask: "notifier_thread", FSEventTask: "fsevent_batcher_thread", } class LibrarianDaemon(Process): """Librarian Process.""" def __init__(self, logger_, queue: Queue, broadcast_queue: Queue) -> None: """Init process.""" self.log = logger_ name = self.__class__.__name__ super().__init__(name=name, daemon=False) self.queue = queue self.broadcast_queue = broadcast_queue self.status_controller = StatusController(logger_, queue) startup_tasks = ( JanitorAdoptOrphanFoldersTask(), SearchIndexSyncTask(), ) for task in startup_tasks: self.queue.put(task) self.run_loop = True self._reversed_threads: tuple[NamedThread, ...] = () def _restart_fs_watcher(self) -> None: self._threads.library_watcher_thread.restart() def _restart_codex(self, task: LibrarianTask) -> None: restarter = CodexRestarter(self.log, self.queue, self.db_write_lock) restarter.handle_task(task) def _process_task(self, task) -> None: """Process an individual task popped off the queue.""" # Simply requeue tasks to the handler thread. for task_type, thread_attr in _THREAD_QUEUE_TASK_MAP.items(): if isinstance(task, task_type): getattr(self._threads, thread_attr).queue.put(task) return match task: case ScribeTask(): # Special put method does queue put preprocessing. self._threads.scribe_thread.put(task) case FSWatcherRestartTask(): self._restart_fs_watcher() case FSPollLibrariesTask(): self._threads.library_poller_thread.poll(task) case WakeCronTask(): self._threads.cron_thread.end_timeout() case CodexRestarterTask(): self._restart_codex(task) case LibrarianShutdownTask(): self.log.info(f"Shutting down {self.name}...") self.run_loop = False case _: self.log.warning(f"Unhandled Librarian task: {task}") def _create_threads(self) -> None: """Create all the threads.""" self.log.debug("Creating Librarian threads...") self.log.debug(f"Active threads before thread creation: {active_count()}") self.db_write_lock = Lock() # pyright: ignore[reportUninitializedInstanceVariable] threads = {} kwargs: dict[str, Any] = {} for name, thread_class in _THREAD_CLASS_MAP.items(): thread_kwargs = copy(kwargs) if thread_class == NotifierThread: thread_kwargs["broadcast_queue"] = self.broadcast_queue thread = thread_class( self.log, self.queue, self.db_write_lock, **thread_kwargs ) threads[name] = thread self.log.debug(f"Created {name} thread.") self._threads = LibrarianThreads(**threads) # pyright: ignore[reportUninitializedInstanceVariable] self.log.debug("Threads created") def _start_threads(self) -> None: """Start all librarian's threads.""" self.log.debug(f"{self.name} starting all threads.") for thread in self._threads: thread.start() self.log.info(f"{self.name} started all threads.") def _startup(self) -> None: """Initialize threads.""" self.log.debug(f"Started {self.name}.") # Janitor created in init. self._create_threads() # can't do this in init. self._start_threads() self.log.success(f"{self.name} ready for tasks.") def _stop_threads(self) -> None: """Stop all librarian's threads.""" self.log.debug(f"{self.name} stopping all threads...") for thread in self._reversed_threads: thread.stop() self.log.debug(f"{self.name} stopped all threads.") def _join_threads(self) -> None: """Join all librarian threads.""" self.log.debug(f"{self.name} joining all threads...") for thread in self._reversed_threads: thread.join() self.log.info(f"{self.name} joined all threads.") def _shutdown(self) -> None: """Shutdown threads and queues.""" self._reversed_threads = tuple(reversed(self._threads)) # ty: ignore[invalid-assignment] self._stop_threads() self._join_threads() while not self.queue.empty(): self.queue.get_nowait() self.queue.close() self.queue.join_thread() self.log.success(f"{self.name} finished.") @override def run(self) -> None: """ Process tasks from the queue. This process also runs the crond thread and the watcher Observer threads. """ self._startup() try: while self.run_loop: try: task = self.queue.get() close_old_connections() self._process_task(task) except Exception: self.log.exception(f"In {self.name} loop") except Exception: self.log.exception(f"{self.name} crashed.") except KeyboardInterrupt: self.log.debug(f"{self.name} Keyboard interrupt") finally: self._shutdown() def stop(self) -> None: """Close up the librarian process.""" self.queue.put(LibrarianShutdownTask()) self.queue.close() self.queue.join_thread() self.join() self.close() ================================================ FILE: codex/librarian/memory.py ================================================ """Detect how much memory we're working with.""" import resource from contextlib import suppress from pathlib import Path from psutil import virtual_memory # cgroups1 MEMORY_STAT_PATH = "/sys/fs/cgroup/memory/memory.stat" # cgroups2 MEMORY_MAX_PATH = "/sys/fs/cgroup/memory.max" DIVISORS = {"b": 1, "k": 1024, "m": 1024**2, "g": 1024**3} def _get_cgroups2_mem_limit() -> int: """Get mem limit from cgroups2.""" with Path(MEMORY_MAX_PATH).open("r") as limit: return int(limit.read()) def _get_cgroups1_mem_limit() -> int | None: """Get mem limit from cgroups1.""" with Path(MEMORY_STAT_PATH).open("r") as mem_stat_file: for line in mem_stat_file: parts = line.split() if not parts or len(parts) < 2: # noqa: PLR2004 continue if "hierarchical_memory_limit" in parts[0]: return int(parts[1]) return None def get_mem_limit(divisor="b"): """ Get the current memlimit. If we're in a container set the limit too. """ mem_limit = None api_funcs = (_get_cgroups2_mem_limit, _get_cgroups1_mem_limit) for func in api_funcs: with suppress(Exception): mem_limit = func() if mem_limit: break if mem_limit: # Set the process memlimit. resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit)) if not mem_limit: # get the raw host memory mem_limit = virtual_memory().total return mem_limit / DIVISORS.get(divisor, 1) ================================================ FILE: codex/librarian/mp_queue.py ================================================ """Library Queue.""" # This file cannot be named queue or it causes weird type checker errors from multiprocessing import Queue LIBRARIAN_QUEUE = Queue() ================================================ FILE: codex/librarian/notifier/__init__.py ================================================ """Notifier Thread.""" ================================================ FILE: codex/librarian/notifier/notifierd.py ================================================ """Sends notifications to connections, reading from a queue.""" from typing import override from codex.librarian.threads import AggregateMessageQueuedThread class NotifierThread(AggregateMessageQueuedThread): """Aggregates messages preventing floods and sends messages to clients.""" def __init__(self, *args, broadcast_queue, **kwargs) -> None: """Initialize local send url.""" self.broadcast_queue = broadcast_queue super().__init__(*args, **kwargs) @override def aggregate_items(self, item) -> None: """Aggregate messages into cache.""" self.cache[item.text] = item def _send_task(self, task) -> None: """ Send a group_send message to the mulitprocess broadcast channel. A random consumer awaiting the broadcast channel will consume it, and do a group_send with it's message. """ item = { "group": task.group, "message": { "type": "send_text", "text": task.text, }, } self.broadcast_queue.put(item) @override def send_all_items(self) -> None: """Send all messages waiting in the message cache to client.""" if not self.cache: return sent_keys = set() for task in self.cache.values(): try: self._send_task(task) except Exception: self.log.exception("Notifier send task") sent_keys.add(task.text) self.cleanup_cache(sent_keys) @override def stop(self) -> None: """Send the consumer stop broadcast and stop the thread.""" self.broadcast_queue.put(None) self.broadcast_queue.close() self.broadcast_queue.join_thread() super().stop() ================================================ FILE: codex/librarian/notifier/tasks.py ================================================ """Notifier Tasks.""" from dataclasses import dataclass from codex.choices.notifications import Notifications from codex.librarian.tasks import LibrarianTask from codex.websockets.consumers import ChannelGroups @dataclass class NotifierTask(LibrarianTask): """Handle with the Notifier.""" text: str group: str ADMIN_FLAGS_CHANGED_TASK = NotifierTask( Notifications.ADMIN_FLAGS.value, ChannelGroups.ALL.name ) COVERS_CHANGED_TASK = NotifierTask(Notifications.COVERS.value, ChannelGroups.ALL.name) FAILED_IMPORTS_CHANGED_TASK = NotifierTask( Notifications.FAILED_IMPORTS.value, ChannelGroups.ADMIN.name ) GROUPS_CHANGED_TASK = NotifierTask(Notifications.GROUPS.value, ChannelGroups.ALL.name) LIBRARIAN_STATUS_TASK = NotifierTask( Notifications.LIBRARIAN_STATUS.value, ChannelGroups.ADMIN.name ) LIBRARY_CHANGED_TASK = NotifierTask(Notifications.LIBRARY.value, ChannelGroups.ALL.name) USERS_CHANGED_TASK = NotifierTask(Notifications.USERS.value, ChannelGroups.ALL.name) ADMIN_USERS_CHANGED_TASK = NotifierTask( Notifications.USERS.value, ChannelGroups.ADMIN.name ) ================================================ FILE: codex/librarian/restarter/__init__.py ================================================ """Codex restarter.""" ================================================ FILE: codex/librarian/restarter/restarter.py ================================================ """Update the codex python package.""" import os import signal from codex.librarian.restarter.status import ( CodexRestarterRestartStatus, CodexRestarterStatus, CodexRestarterStopStatus, ) from codex.librarian.restarter.tasks import ( CodexRestartTask, CodexShutdownTask, ) from codex.librarian.tasks import LibrarianShutdownTask from codex.librarian.worker import WorkerStatusBase class CodexRestarter(WorkerStatusBase): """Codex restarter.""" def _shutdown_codex( self, status: CodexRestarterStatus, name: str, sig: signal.Signals ) -> None: """Send a system signal as handled in run.py.""" try: self.status_controller.start(status) self.log.info(f"Sending {name} signal.") main_pid = os.getppid() os.kill(main_pid, sig) finally: self.status_controller.finish(status) # Librarian shutdown must come after the kill signal. self.librarian_queue.put(LibrarianShutdownTask()) def shutdown_codex(self) -> None: """Shutdown codex.""" self._shutdown_codex(CodexRestarterStopStatus(), "stop", signal.SIGTERM) def restart_codex(self) -> None: """Restart codex.""" self._shutdown_codex(CodexRestarterRestartStatus(), "restart", signal.SIGUSR1) def handle_task(self, task) -> None: """Handle Codex reatarter tasks.""" match task: case CodexRestartTask(): self.restart_codex() case CodexShutdownTask(): self.shutdown_codex() case _: self.log.warning(f"Unknown Codex RestarterTask: f{task}") ================================================ FILE: codex/librarian/restarter/status.py ================================================ """Restarter Statii.""" from abc import ABC from codex.librarian.status import Status class CodexRestarterStatus(Status, ABC): """Codex Restarter Statii.""" class CodexRestarterRestartStatus(CodexRestarterStatus): """Codex Restarter Restart Status.""" CODE = "RCR" VERB = "Restart" _verbed = "Restarted" ITEM_NAME = "Codex server" SINGLE = True class CodexRestarterStopStatus(CodexRestarterStatus): """Codex Restarter Restart Status.""" CODE = "RCS" VERB = "Stop" _verbed = "Stopped" ITEM_NAME = "Codex server" SINGLE = True RESTARTER_STATII = (CodexRestarterRestartStatus, CodexRestarterStopStatus) ================================================ FILE: codex/librarian/restarter/tasks.py ================================================ """Codex Restarter Taskss.""" from codex.librarian.tasks import LibrarianTask class CodexRestarterTask(LibrarianTask): """Restart and Shutdown.""" class CodexRestartTask(CodexRestarterTask): """Restart Codex.""" class CodexShutdownTask(CodexRestarterTask): """Shutdown Codex.""" ================================================ FILE: codex/librarian/scribe/__init__.py ================================================ """Scribe module for bulk writes to sqlite.""" ================================================ FILE: codex/librarian/scribe/importer/__init__.py ================================================ """Comic Importer.""" ================================================ FILE: codex/librarian/scribe/importer/const.py ================================================ """BULK_CREATE_COMIC_FIELDSConsts and maps for import.""" from types import MappingProxyType, SimpleNamespace from bidict import frozenbidict from django.db.models.fields import Field from django.db.models.fields.related import ForeignObjectRel, ManyToManyField from codex.models.base import BaseModel from codex.models.comic import Comic from codex.models.groups import ( BrowserGroupModel, Folder, Imprint, Publisher, Series, Volume, ) from codex.models.identifier import Identifier, IdentifierSource from codex.models.named import ( AgeRating, Character, Country, Credit, CreditPerson, CreditRole, Genre, Language, Location, OriginalFormat, ScanInfo, SeriesGroup, Story, StoryArc, StoryArcNumber, Tag, Tagger, Team, Universe, ) from codex.models.paths import CustomCover ############### # FIELD NAMES # ############### FOLDERS_FIELD_NAME = "folders" PUBLISHER_FIELD_NAME = "publisher" IMPRINT_FIELD_NAME = "imprint" VOLUME_FIELD_NAME = "volume" SERIES_FIELD_NAME = "series" PARENT_FOLDER_FIELD_NAME = "parent_folder" VOLUME_COUNT_FIELD_NAME = "volume_count" ISSUE_COUNT_FIELD_NAME = "issue_count" PATH_FIELD_NAME = "path" IDENTIFIERS_FIELD_NAME = "identifiers" NON_FTS_FIELDS = frozenset( { # Attributes "critical_rating", "day", "file_type", "issue_number", "issue_suffix", "metadata_mtime", "monochrome", "month", "notes", "page_count", "path", "reading_direction", "year", # FKs PARENT_FOLDER_FIELD_NAME, VOLUME_FIELD_NAME, # M2Ms FOLDERS_FIELD_NAME, IDENTIFIERS_FIELD_NAME, } ) ########################## # IMPORTER METADATA KEYS # ########################## EXTRACTED = "extracted" SKIPPED = "skipped" QUERY_MODELS = "query_models" CREATE_COMICS = "create_comics" CREATE_FKS = "create_fks" CREATE_COVERS = "create_covers" UPDATE_COMICS = "update_comics" UPDATE_FKS = "update_fks" UPDATE_COVERS = "update_covers" LINK_COVER_PKS = "link_cover_pks" LINK_FKS = "link_fks" LINK_M2MS = "link_m2ms" DELETE_M2MS = "delete_m2ms" FIS = "fis" TOTAL = "total" FK_KEYS = SimpleNamespace(CREATE_FKS=CREATE_FKS, UPDATE_FKS=UPDATE_FKS) FTS_UPDATE = "fts_update" FTS_CREATE = "fts_create" FTS_EXISTING_M2MS = "fts_existing_m2ms" FTS_CREATED_M2MS = "fts_created_m2ms" FTS_UPDATED_M2MS = "fts_updated_m2ms" ####### # M2M # ####### GROUP_MODEL_COUNT_FIELDS: MappingProxyType[type[BrowserGroupModel], str | None] = ( MappingProxyType( { Publisher: None, Imprint: None, Series: VOLUME_COUNT_FIELD_NAME, Volume: ISSUE_COUNT_FIELD_NAME, } ) ) COMIC_M2M_FIELDS: tuple[ManyToManyField, ...] = tuple( # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] field for field in Comic._meta.get_fields() if (field.many_to_many and not field.auto_created) ) COMIC_M2M_FIELD_NAMES: tuple[str, ...] = tuple(field.name for field in COMIC_M2M_FIELDS) COMPLEX_M2M_MODELS = (Credit, Identifier, StoryArcNumber) ######################## # COMPLEX M2M METADATA # ######################## DictModelType = Credit | Identifier | StoryArcNumber CREDITS_FIELD_NAME = "credits" CREDIT_PERSON_FIELD_NAME = "person" CREDIT_ROLE_FIELD_NAME = "role" STORY_ARC_NUMBERS_FIELD_NAME = "story_arc_numbers" STORY_ARC_FIELD_NAME = "story_arc" NUMBER_FIELD_NAME = "number" IDENTIFIER_SOURCE_FIELD_NAME = "source" IDENTIFIER_TYPE_FIELD_NAME = "id_type" IDENTIFIER_ID_KEY_FIELD_NAME = "key" IDENTIFIER_URL_FIELD_NAME = "url" UNIVERSES_FIELD_NAME = "universes" NAME_FIELD_NAME = "name" NUMBER_TO_FIELD_NAME = "number_to" IDENTIFIER_FIELD_NAME = "identifier" DESIGNATION_FIELD_NAME = "designation" ######################## # QUERY AND CREATE FKS # ######################## COMIC_FK_FIELDS: tuple[Field | ForeignObjectRel, ...] = tuple( field for field in Comic._meta.get_fields() if field and field.many_to_one and field.name != "library" and field.related_model and not issubclass(field.related_model, BrowserGroupModel) ) GROUP_FIELD_NAMES = ("publisher", "imprint", "series", "volume") GROUP_FIELD_NAMES_SET = frozenset(GROUP_FIELD_NAMES) _COMIC_GROUP_FIELDS: tuple[Field, ...] = tuple( Comic._meta.get_field(field_name) for field_name in GROUP_FIELD_NAMES ) ALL_COMIC_FK_FIELDS = (*_COMIC_GROUP_FIELDS, *COMIC_FK_FIELDS) COMIC_FK_FIELD_NAMES: tuple[str, ...] = tuple( field.name for field in ALL_COMIC_FK_FIELDS ) PROTAGONIST_FIELD_MODEL_MAP = MappingProxyType( {"main_character": Character, "main_team": Team} ) _DEFAULT_KEY_INDEX = 1 _IDENTIFIER_RELS = ( "identifier__source__name", "identifier__id_type", "identifier__key", ) _NAMED_MODEL_RELS = ((NAME_FIELD_NAME,), "") NAMED_MODELS = frozenset( { AgeRating, Country, Language, OriginalFormat, Tagger, ScanInfo, SeriesGroup, IdentifierSource, } ) _IDENTIFIED_MODEL_RELS = ((NAME_FIELD_NAME,), _IDENTIFIER_RELS) IDENTIFIED_MODELS = frozenset( {Character, CreditPerson, CreditRole, Genre, Location, Story, StoryArc, Tag, Team} ) MODEL_REL_MAP: MappingProxyType[type[BaseModel], tuple] = MappingProxyType( { **dict.fromkeys(NAMED_MODELS, _NAMED_MODEL_RELS), **dict.fromkeys(IDENTIFIED_MODELS, _IDENTIFIED_MODEL_RELS), Identifier: ( ( f"{IDENTIFIER_SOURCE_FIELD_NAME}__name", IDENTIFIER_TYPE_FIELD_NAME, IDENTIFIER_ID_KEY_FIELD_NAME, ), "", IDENTIFIER_URL_FIELD_NAME, ), Publisher: _IDENTIFIED_MODEL_RELS, Imprint: ( ( "publisher__name", NAME_FIELD_NAME, ), _IDENTIFIER_RELS, ), Series: ( ( "publisher__name", "imprint__name", NAME_FIELD_NAME, ), _IDENTIFIER_RELS, VOLUME_COUNT_FIELD_NAME, ), Volume: ( ( "publisher__name", "imprint__name", "series__name", NAME_FIELD_NAME, NUMBER_TO_FIELD_NAME, ), "", ISSUE_COUNT_FIELD_NAME, ), Folder: ( (PATH_FIELD_NAME,), "", ), Credit: ( ( f"{CREDIT_PERSON_FIELD_NAME}__name", f"{CREDIT_ROLE_FIELD_NAME}__name", ), "", ), StoryArcNumber: ( (f"{STORY_ARC_FIELD_NAME}__name", NUMBER_FIELD_NAME), "", ), Universe: ((NAME_FIELD_NAME,), _IDENTIFIER_RELS, DESIGNATION_FIELD_NAME), } ) _IDENTIFIED_SELECT_RELATED = ("identifier", "identifier__source") MODEL_SELECT_RELATED: MappingProxyType[type[BaseModel], tuple[str, ...]] = ( MappingProxyType( { **dict.fromkeys(IDENTIFIED_MODELS, _IDENTIFIED_SELECT_RELATED), Identifier: (IDENTIFIER_SOURCE_FIELD_NAME,), Publisher: _IDENTIFIED_SELECT_RELATED, Imprint: ("publisher",), Series: ("publisher", "imprint"), Volume: ("publisher", "imprint", "series"), Credit: (CREDIT_PERSON_FIELD_NAME, CREDIT_ROLE_FIELD_NAME), StoryArcNumber: (STORY_ARC_FIELD_NAME,), Universe: _IDENTIFIED_SELECT_RELATED, } ) ) FIELD_NAME_KEYS_REL_MAP = MappingProxyType( { field.name: MODEL_REL_MAP[field.related_model][0] # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] for field in (*ALL_COMIC_FK_FIELDS, *COMIC_M2M_FIELDS) } ) _NAMED_MODEL_ATTRS = ("name",) _IDENTIFIER_KEY_ATTRS = ( IDENTIFIER_SOURCE_FIELD_NAME, IDENTIFIER_TYPE_FIELD_NAME, IDENTIFIER_ID_KEY_FIELD_NAME, ) FIELD_NAME_KEY_ATTRS_MAP = MappingProxyType( { **dict.fromkeys(COMIC_M2M_FIELD_NAMES, _NAMED_MODEL_ATTRS), FOLDERS_FIELD_NAME: ("path",), IDENTIFIER_FIELD_NAME: _IDENTIFIER_KEY_ATTRS, IDENTIFIERS_FIELD_NAME: _IDENTIFIER_KEY_ATTRS, PUBLISHER_FIELD_NAME: _NAMED_MODEL_ATTRS, IMPRINT_FIELD_NAME: (PUBLISHER_FIELD_NAME, *_NAMED_MODEL_ATTRS), SERIES_FIELD_NAME: ( PUBLISHER_FIELD_NAME, IMPRINT_FIELD_NAME, *_NAMED_MODEL_ATTRS, ), VOLUME_FIELD_NAME: ( PUBLISHER_FIELD_NAME, IMPRINT_FIELD_NAME, SERIES_FIELD_NAME, *_NAMED_MODEL_ATTRS, ), CREDITS_FIELD_NAME: (CREDIT_PERSON_FIELD_NAME, CREDIT_ROLE_FIELD_NAME), STORY_ARC_NUMBERS_FIELD_NAME: (STORY_ARC_FIELD_NAME, NUMBER_FIELD_NAME), } ) def get_key_index(model: type[BaseModel]) -> int: """Return the key index divider for a model tuple.""" return len(MODEL_REL_MAP[model][0]) ################# # CREATE COMICS # ################# _EXCLUDEBULK_UPDATE_COMIC_FIELDS = { "bookmark", "created_at", "id", "library", "comicfts", } BULK_UPDATE_COMIC_FIELDS = tuple( sorted( field.name for field in Comic._meta.get_fields() # Concrete check protects against SettingsReader reverse relations being updated if field.concrete # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] and (not field.many_to_many) and (field.name not in _EXCLUDEBULK_UPDATE_COMIC_FIELDS) ) ) BULK_CREATE_COMIC_FIELDS = (*BULK_UPDATE_COMIC_FIELDS, "library") BULK_UPDATE_FOLDER_FIELDS = ( "name", "parent_folder", "path", "sort_name", "stat", "updated_at", ) BULK_UPDATE_FOLDER_MODIFIED_FIELDS = ("stat", "updated_at") ########## # COVERS # ########## CLASS_CUSTOM_COVER_GROUP_MAP = frozenbidict( { Publisher: CustomCover.GroupChoices.P.value, Imprint: CustomCover.GroupChoices.I.value, Series: CustomCover.GroupChoices.S.value, StoryArc: CustomCover.GroupChoices.A.value, Folder: CustomCover.GroupChoices.F.value, } ) ######### # MOVED # ######### MOVED_BULK_COMIC_UPDATE_FIELDS = ("path", "parent_folder", "stat", "updated_at") CUSTOM_COVER_UPDATE_FIELDS = ("path", "stat", "updated_at", "sort_name", "group") ########### # DELETED # ########### ALL_COMIC_GROUP_FIELD_NAMES = ( *GROUP_FIELD_NAMES, "story_arc_numbers", "folders", ) ########## # Failed # ########## UPDATE_FIS = "update_fis" CREATE_FIS = "create_fis" DELETE_FI_PATHS = "delete_fi_paths" BULK_UPDATE_FAILED_IMPORT_FIELDS = ("name", "stat", "updated_at") ================================================ FILE: codex/librarian/scribe/importer/create/__init__.py ================================================ """ Create all missing comic foreign keys for an import. So we may safely create the comics next. """ from codex.librarian.scribe.importer.create.foreign_keys import ( CreateForeignKeysCreateUpdateImporter, ) class CreateForeignKeysImporter(CreateForeignKeysCreateUpdateImporter): """Methods for creating foreign keys.""" def create_and_update(self) -> None: """Create and update FKs, covers and comics.""" if self.abort_event.is_set(): return fk_count = self.create_all_fks() if self.abort_event.is_set(): return fk_count += self.update_all_fks() self.counts.tags = fk_count if self.abort_event.is_set(): return cover_count = self.create_custom_covers() if self.abort_event.is_set(): return cover_count += self.update_custom_covers() self.counts.covers = cover_count if self.abort_event.is_set(): return comic_count = self.update_comics() comic_count += self.create_comics() self.counts.comic = comic_count ================================================ FILE: codex/librarian/scribe/importer/create/comics.py ================================================ """Bulk update and create comic objects and bulk update m2m fields.""" from django.db.models import NOT_PROVIDED from django.db.models.functions import Now from codex.librarian.scribe.importer.const import ( BULK_CREATE_COMIC_FIELDS, BULK_UPDATE_COMIC_FIELDS, CREATE_COMICS, FTS_CREATE, FTS_UPDATE, LINK_FKS, NON_FTS_FIELDS, PATH_FIELD_NAME, UPDATE_COMICS, ) from codex.librarian.scribe.importer.create.link_fks import ( CreateForeignKeyLinksImporter, ) from codex.librarian.scribe.importer.statii.create import ( ImporterCreateComicsStatus, ImporterUpdateComicsStatus, ) from codex.models import Comic from codex.settings import IMPORTER_UPDATE_COMIC_BATCH_SIZE class CreateComicsImporter(CreateForeignKeyLinksImporter): """Create comics methods.""" def _populate_fts_attribute_values(self, key: str, sub_key: str | int, md) -> None: if sub_key not in self.metadata[key]: self.metadata[key][sub_key] = {} for field_name, value in md.items(): if field_name not in NON_FTS_FIELDS: self.metadata[key][sub_key][field_name] = (value,) def _update_comic_values( self, comic: Comic, update_comics: list, comic_pks: list ) -> None: md = self.metadata[UPDATE_COMICS].pop(comic.pk, {}) for field_name, value in md.items(): setattr(comic, field_name, value) self._populate_fts_attribute_values(FTS_UPDATE, comic.pk, md) link_md = self.get_comic_fk_links(comic.pk, comic.path) for field_name, value in link_md.items(): set_value = value if set_value is None: default_value = Comic._meta.get_field(field_name).default if default_value != NOT_PROVIDED: set_value = default_value setattr(comic, field_name, set_value) comic.presave() comic.updated_at = Now() update_comics.append(comic) comic_pks.append(comic.pk) def update_comics(self) -> int: """Bulk update comics, and move nonextant comics into create job..""" count = 0 pks = tuple(sorted(self.metadata[UPDATE_COMICS].keys())) num_comics = len(pks) status = ImporterUpdateComicsStatus(None, num_comics) try: if not num_comics: self.metadata.pop(UPDATE_COMICS) self.status_controller.finish(status) return count self.log.debug( f"Preparing {num_comics} comics for update in library {self.library.path}." ) self.status_controller.start(status) self.metadata[FTS_UPDATE] = {} # Get existing comics to update comics = Comic.objects.filter(library=self.library, pk__in=pks).only( PATH_FIELD_NAME, *BULK_UPDATE_COMIC_FIELDS ) # set attributes for each comic update_comics = [] comic_pks = [] for comic in comics: if self.abort_event.is_set(): return count try: self._update_comic_values(comic, update_comics, comic_pks) except Exception: self.log.exception(f"Error preparing {comic} for update.") self.metadata.pop(UPDATE_COMICS) count = len(update_comics) if count: self.log.debug(f"Bulk updating {len(update_comics)} comics.") Comic.objects.bulk_update( update_comics, BULK_UPDATE_COMIC_FIELDS, batch_size=IMPORTER_UPDATE_COMIC_BATCH_SIZE, ) if comic_pks: self.log.debug( f"Purging covers for {len(comic_pks)} updated comics." ) self.remove_covers(comic_pks, custom=False) except Exception: self.log.exception(f"While updating comics: {pks}") finally: self.status_controller.finish(status) return count def _bulk_create_comic(self, path: str, create_comics: list[Comic]) -> None: md = self.metadata[CREATE_COMICS].pop(path, {}) self._populate_fts_attribute_values(FTS_CREATE, path, md) link_md = self.get_comic_fk_links(path, path) md.update(link_md) if not md: return comic = Comic(**md, library=self.library) comic.presave() create_comics.append(comic) def create_comics(self) -> int: """Bulk create comics.""" count = 0 paths = tuple(sorted(self.metadata[CREATE_COMICS].keys())) num_comics = len(paths) status = ImporterCreateComicsStatus(None, num_comics) try: if not num_comics: self.metadata.pop(CREATE_COMICS) self.metadata.pop(LINK_FKS) self.status_controller.finish(status) return count self.log.debug( f"Preparing {num_comics} comics for creation in library {self.library.path}." ) self.status_controller.start(status) self.metadata[FTS_CREATE] = {} create_comics = [] for path in paths: if self.abort_event.is_set(): return count try: self._bulk_create_comic(path, create_comics) except KeyError: self.log.warning(f"No comic metadata for {path}") self.log.exception(f"Error preparing {path} for create.") except Exception: self.log.exception(f"Error preparing {path} for create.") self.metadata.pop(CREATE_COMICS) self.metadata.pop(LINK_FKS) num_comics = len(create_comics) if num_comics: self.log.debug(f"Bulk creating {num_comics} comics...") created_comics = Comic.objects.bulk_create( create_comics, update_conflicts=True, update_fields=BULK_CREATE_COMIC_FIELDS, unique_fields=Comic._meta.unique_together[0], ) count = len(created_comics) # Replace FTS_CREATE path keyed entries with pk keys for created_comic in created_comics: self.metadata[FTS_CREATE][created_comic.pk] = self.metadata[ FTS_CREATE ].pop(created_comic.path) except Exception: self.log.exception(f"While creating {num_comics} comics") finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/create/const.py ================================================ """Create fks consts.""" from collections.abc import Mapping from types import MappingProxyType from codex.librarian.scribe.importer.const import ( CREDIT_PERSON_FIELD_NAME, CREDIT_ROLE_FIELD_NAME, DESIGNATION_FIELD_NAME, IDENTIFIED_MODELS, IDENTIFIER_FIELD_NAME, IDENTIFIER_ID_KEY_FIELD_NAME, IDENTIFIER_SOURCE_FIELD_NAME, IDENTIFIER_TYPE_FIELD_NAME, IDENTIFIER_URL_FIELD_NAME, IMPRINT_FIELD_NAME, ISSUE_COUNT_FIELD_NAME, NAME_FIELD_NAME, NAMED_MODELS, NUMBER_FIELD_NAME, NUMBER_TO_FIELD_NAME, PUBLISHER_FIELD_NAME, SERIES_FIELD_NAME, STORY_ARC_FIELD_NAME, VOLUME_COUNT_FIELD_NAME, ) from codex.models import ( Credit, StoryArc, StoryArcNumber, Volume, ) from codex.models.base import BaseModel from codex.models.comic import Comic from codex.models.groups import Folder, Imprint, Publisher, Series from codex.models.identifier import ( Identifier, IdentifierSource, ) from codex.models.named import ( CreditPerson, CreditRole, Universe, ) ORDERED_CREATE_MODELS = ( IdentifierSource, Identifier, Publisher, Imprint, Series, Volume, ) CUSTOM_COVER_MODELS = frozenset({Publisher, Imprint, Series, Volume, Folder, StoryArc}) GROUP_BASE_MODELS = frozenset({Publisher, Imprint, Series, StoryArc}) GROUP_BASE_FIELDS = ("name", "sort_name") DEFAULT_NON_NULL_CHARFIELD_NAMES = frozenset({"name"}) NON_NULL_CHARFIELD_NAMES: MappingProxyType[type[BaseModel], frozenset[str]] = ( MappingProxyType( { Universe: frozenset({"designation"}), Identifier: frozenset({"id_type", "key", "url"}), Comic: frozenset({"path"}), Volume: frozenset({}), } ) ) _NAMED_CREATE_ARGS = ({NAME_FIELD_NAME: None}, {}) _IDENTIFIED_CREATE_ARGS = ({NAME_FIELD_NAME: None}, {IDENTIFIER_FIELD_NAME: Identifier}) MODEL_CREATE_ARGS_MAP: MappingProxyType[ type[BaseModel], tuple[Mapping[str, type[BaseModel] | None], ...] ] = MappingProxyType( { **dict.fromkeys(NAMED_MODELS, _NAMED_CREATE_ARGS), Identifier: ( { IDENTIFIER_SOURCE_FIELD_NAME: IdentifierSource, IDENTIFIER_TYPE_FIELD_NAME: None, IDENTIFIER_ID_KEY_FIELD_NAME: None, }, { IDENTIFIER_URL_FIELD_NAME: None, }, ), Publisher: _IDENTIFIED_CREATE_ARGS, Imprint: ( { PUBLISHER_FIELD_NAME: Publisher, NAME_FIELD_NAME: None, }, {IDENTIFIER_FIELD_NAME: Identifier}, ), Series: ( { PUBLISHER_FIELD_NAME: Publisher, IMPRINT_FIELD_NAME: Imprint, NAME_FIELD_NAME: None, }, {IDENTIFIER_FIELD_NAME: Identifier, VOLUME_COUNT_FIELD_NAME: None}, ), Volume: ( { PUBLISHER_FIELD_NAME: Publisher, IMPRINT_FIELD_NAME: Imprint, SERIES_FIELD_NAME: Series, NAME_FIELD_NAME: None, NUMBER_TO_FIELD_NAME: None, }, {ISSUE_COUNT_FIELD_NAME: None}, ), **dict.fromkeys(IDENTIFIED_MODELS, _IDENTIFIED_CREATE_ARGS), Credit: ( { CREDIT_PERSON_FIELD_NAME: CreditPerson, CREDIT_ROLE_FIELD_NAME: CreditRole, }, {}, ), StoryArcNumber: ({STORY_ARC_FIELD_NAME: StoryArc, NUMBER_FIELD_NAME: None}, {}), Universe: ( { NAME_FIELD_NAME: None, }, { IDENTIFIER_FIELD_NAME: Identifier, DESIGNATION_FIELD_NAME: None, }, ), } ) ================================================ FILE: codex/librarian/scribe/importer/create/covers.py ================================================ """Create Custom Covers.""" from django.core.exceptions import ObjectDoesNotExist from django.db.models.functions.datetime import Now from codex.librarian.scribe.importer.const import ( CLASS_CUSTOM_COVER_GROUP_MAP, CREATE_COVERS, CUSTOM_COVER_UPDATE_FIELDS, LINK_COVER_PKS, UPDATE_COVERS, ) from codex.librarian.scribe.importer.create.comics import CreateComicsImporter from codex.librarian.scribe.importer.statii.create import ( ImporterCreateCoversStatus, ImporterUpdateCoversStatus, ) from codex.librarian.scribe.importer.statii.link import ImporterLinkCoversStatus from codex.models import CustomCover class CreateCoversImporter(CreateComicsImporter): """Create Custom Covers.""" @staticmethod def add_custom_cover_to_group(group_class, obj) -> None: """If a custom cover exists for this group, add it.""" group = CLASS_CUSTOM_COVER_GROUP_MAP.get(group_class) if not group: # Normal, volume doesn't link to covers return try: cover = CustomCover.objects.filter(group=group, sort_name=obj.sort_name)[0] obj.custom_cover = cover except (IndexError, ObjectDoesNotExist): pass def update_custom_covers(self) -> int: """Update Custom Covers.""" count = 0 status = ImporterUpdateCoversStatus(0) try: update_covers_qs = self.metadata.pop(UPDATE_COVERS, None) if not update_covers_qs: self.status_controller.finish(status) return count update_covers_count = update_covers_qs.count() if not update_covers_count: self.status_controller.finish(status) return count status.total = update_covers_count self.status_controller.start(status) update_covers = [] for cover in update_covers_qs.only(*CUSTOM_COVER_UPDATE_FIELDS): cover.updated_at = Now() cover.presave() update_covers.append(cover) if update_covers: CustomCover.objects.bulk_update( update_covers, CUSTOM_COVER_UPDATE_FIELDS ) update_cover_pks = update_covers_qs.values_list("pk", flat=True) if LINK_COVER_PKS not in self.metadata: self.metadata[LINK_COVER_PKS] = set() self.metadata[LINK_COVER_PKS].update(update_cover_pks) self.remove_covers(update_cover_pks, custom=True) count = len(update_covers) if status: status.increment_complete(count) link_covers_status = ImporterLinkCoversStatus( 0, len(self.metadata[LINK_COVER_PKS]), ) self.status_controller.update(link_covers_status, notify=False) finally: self.status_controller.finish(status) return count def create_custom_covers(self) -> int: """Create Custom Covers.""" count = 0 create_cover_paths = self.metadata.pop(CREATE_COVERS, ()) num_create_cover_paths = len(create_cover_paths) status = ImporterCreateCoversStatus(0, num_create_cover_paths) try: if not num_create_cover_paths: self.status_controller.finish(status) return count self.status_controller.start(status) create_covers = [] for path in create_cover_paths: cover = CustomCover(library=self.library, path=path) cover.presave() create_covers.append(cover) if create_covers: objs = CustomCover.objects.bulk_create( create_covers, update_conflicts=True, update_fields=("path", "stat"), unique_fields=CustomCover._meta.unique_together[0], ) created_pks = frozenset(obj.pk for obj in objs) if LINK_COVER_PKS not in self.metadata: self.metadata[LINK_COVER_PKS] = set() self.metadata[LINK_COVER_PKS].update(created_pks) count = len(created_pks) if status: status.increment_complete(count) link_covers_status = ImporterLinkCoversStatus( 0, len(self.metadata[LINK_COVER_PKS]) ) self.status_controller.update(link_covers_status, notify=False) finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/create/folders.py ================================================ """Create missing folder foreign keys for an import.""" from pathlib import Path from codex.librarian.scribe.importer.const import BULK_UPDATE_FOLDER_FIELDS from codex.librarian.scribe.importer.create.covers import CreateCoversImporter from codex.librarian.status import Status from codex.models import Folder class CreateForeignKeysFolderImporter(CreateCoversImporter): """Methods for creating foreign keys.""" def _get_parent_folder(self, path: Path): parent = None if str(path) == self.library.path: return parent parent_path = str(path.parent) try: parent = Folder.objects.get(path=parent_path) except Folder.DoesNotExist: if path.parent != Path(self.library.path): reason = ( f"Can't find parent folder {parent_path} for {path} in library" f" {self.library.path}" ) self.log.warning(reason) return parent def _bulk_folders_create_add_folder(self, path: Path, create_folders) -> None: """Add one folder to the create list.""" parent_folder = self._get_parent_folder(path) folder = Folder( library=self.library, path=str(path), name=path.name, parent_folder=parent_folder, ) folder.presave() self.add_custom_cover_to_group(Folder, folder) create_folders.append(folder) def _bulk_folders_create_depth_level(self, paths, status: Status) -> int: """Create a depth level of folders.""" create_folders = [] for path in sorted(paths): self._bulk_folders_create_add_folder(path, create_folders) Folder.objects.bulk_create( create_folders, update_conflicts=True, update_fields=BULK_UPDATE_FOLDER_FIELDS, unique_fields=Folder._meta.unique_together[0], ) count = len(create_folders) status.increment_complete(count) self.status_controller.update(status) return count def bulk_folders_create(self, folder_paths: frozenset, status) -> int: """Create folders breadth first.""" count = 0 if not folder_paths: return count # group folder paths by depth folder_path_dict = {} for path_strs in folder_paths: for path_str in path_strs: path = Path(path_str) path_length = len(path.parts) if path_length not in folder_path_dict: folder_path_dict[path_length] = set() folder_path_dict[path_length].add(path) # create each depth level first to ensure we can assign parents for depth_level in sorted(folder_path_dict): paths = sorted(folder_path_dict[depth_level]) level_count = self._bulk_folders_create_depth_level(paths, status) count += level_count self.log.debug( f"Created {level_count} Folders at depth level {depth_level}" ) self.status_controller.update(status) return count def bulk_folders_update(self, folder_paths: frozenset, status) -> int: """Update existing folders.""" # Is this really moved? count = 0 if not folder_paths: return count folders = Folder.objects.filter(library=self.library, path__in=folder_paths) for folder in folders: folder.save() self.status_controller.update(status) return count ================================================ FILE: codex/librarian/scribe/importer/create/foreign_keys.py ================================================ """ Create all missing comic many to many objects for an import. So we may safely create the comics next. """ from collections.abc import Mapping from itertools import chain from codex.librarian.scribe.importer.const import ( COMPLEX_M2M_MODELS, CREATE_FKS, FTS_CREATED_M2MS, FTS_UPDATED_M2MS, GROUP_MODEL_COUNT_FIELDS, MODEL_REL_MAP, TOTAL, UPDATE_FKS, ) from codex.librarian.scribe.importer.create.const import ( CUSTOM_COVER_MODELS, DEFAULT_NON_NULL_CHARFIELD_NAMES, GROUP_BASE_FIELDS, GROUP_BASE_MODELS, MODEL_CREATE_ARGS_MAP, NON_NULL_CHARFIELD_NAMES, ORDERED_CREATE_MODELS, ) from codex.librarian.scribe.importer.create.folders import ( CreateForeignKeysFolderImporter, ) from codex.librarian.scribe.importer.statii.create import ( ImporterCreateTagsStatus, ImporterUpdateTagsStatus, ) from codex.librarian.status import Status from codex.models.base import BaseModel from codex.models.groups import Folder from codex.models.named import Universe class CreateForeignKeysCreateUpdateImporter(CreateForeignKeysFolderImporter): """Methods for creating foreign keys.""" @staticmethod def _get_create_update_args( model: type[BaseModel], key_args_map: Mapping, update_args_map: Mapping, values_tuple: tuple, ) -> tuple[dict, dict]: """Create key args and update args.""" key_args = {} update_args = {} num_keys = len(key_args_map) for index, (field_name, field_model) in enumerate( chain(key_args_map.items(), update_args_map.items()) ): value = values_tuple[index] if field_model and value is not None: key_rels = MODEL_REL_MAP[field_model][0] if field_model in GROUP_MODEL_COUNT_FIELDS and index < num_keys: value = values_tuple[: index + 1] elif not isinstance(value, tuple): value = (value,) sub_model_key_args = dict(zip(key_rels, value, strict=True)) value = field_model.objects.get(**sub_model_key_args) non_null_charfield_names = NON_NULL_CHARFIELD_NAMES.get( model, DEFAULT_NON_NULL_CHARFIELD_NAMES ) if value is None and field_name in non_null_charfield_names: value = "" arg_map = key_args if index < num_keys else update_args arg_map[field_name] = value return key_args, update_args def _add_custom_cover(self, model, obj) -> None: self.add_custom_cover_to_group(model, obj) def _finish_create_update(self, model, objs, status: Status) -> None: count = len(objs) if count: vnp = model._meta.verbose_name_plural title = vnp.title() if vnp else model._meta.verbose_name self.log.info(f"Created {count} {title}.") status.increment_complete(count) self.status_controller.update(status) def _bulk_create_models( self, model: type[BaseModel], status, ) -> int: """Bulk create a dict type m2m model.""" count = 0 create_tuples = self.metadata[CREATE_FKS].pop(model, None) if not create_tuples: return count status.subtitle = model._meta.verbose_name_plural self.status_controller.update(status) key_args_map, update_args_map = MODEL_CREATE_ARGS_MAP[model] create_objs = [] create_tuples = sorted(create_tuples, key=str) created_fts_values = {} for values_tuple in create_tuples: key_args, update_args = self._get_create_update_args( model, key_args_map, update_args_map, values_tuple, # ty: ignore[invalid-argument-type] ) obj = model(**key_args, **update_args) obj.presave() if model in CUSTOM_COVER_MODELS: self._add_custom_cover(model, obj) create_objs.append(obj) if model == Universe and update_args: fts_update_values = tuple( value for value in update_args.values() if isinstance(value, str) ) created_fts_values[tuple(key_args.values())] = fts_update_values if created_fts_values: field_name = model.__name__.lower() + "s" self.metadata[FTS_CREATED_M2MS][field_name] = created_fts_values update_fields = tuple(key_args_map.keys()) + tuple(update_args_map.keys()) if model in GROUP_BASE_MODELS: update_fields += GROUP_BASE_FIELDS count = len(create_objs) model.objects.bulk_create( create_objs, update_conflicts=True, update_fields=update_fields, unique_fields=model._meta.unique_together[0], ) self._finish_create_update(model, create_objs, status) return count def bulk_create_all_models(self, status) -> int: """ Bulk create all dict type m2m models. Done in three phases for dependency order """ count = 0 complex_models = [] # These come first in this order for model in ORDERED_CREATE_MODELS: count += self._bulk_create_models( model, status, ) for model in tuple(self.metadata[CREATE_FKS]): if model in COMPLEX_M2M_MODELS: # These must come last complex_models.append(model) else: count += self._bulk_create_models( model, status, ) for model in complex_models: count += self._bulk_create_models( model, status, ) return count def _bulk_update_models(self, model: type[BaseModel], status) -> int: count = 0 update_tuples = self.metadata[UPDATE_FKS].pop(model, None) if not update_tuples: return count status.subtitle = model._meta.verbose_name_plural self.status_controller.update(status) key_args_map, update_args_map = MODEL_CREATE_ARGS_MAP[model] update_objs = [] update_tuples = sorted(update_tuples, key=str) fts_update_pks = set() for values_tuple in update_tuples: key_args, update_args = self._get_create_update_args( model, key_args_map, update_args_map, values_tuple, # ty: ignore[invalid-argument-type] ) obj = model.objects.get(**key_args) for key, value in update_args.items(): setattr(obj, key, value) obj.presave() if model in CUSTOM_COVER_MODELS: self._add_custom_cover(model, obj) update_objs.append(obj) # Generic code, but really it's only universe # if tuple( # value for value in update_args.values() if isinstance(value, str) # ): if model == Universe: fts_update_pks.add(obj.pk) if fts_update_pks: field_name = model.__name__.lower() + "s" self.metadata[FTS_UPDATED_M2MS][field_name] = fts_update_pks update_fields = tuple(update_args_map.keys()) if model in GROUP_BASE_MODELS: update_fields += GROUP_BASE_FIELDS count = len(update_objs) model.objects.bulk_update(update_objs, fields=update_fields) self._finish_create_update(model, update_objs, status) return count def bulk_update_all_models(self, status) -> int: """Bulk update all complex models.""" count = 0 for model in tuple(self.metadata[UPDATE_FKS].keys()): count += self._bulk_update_models(model, status) return count def create_all_fks(self) -> int: """Bulk create all foreign keys.""" count = 0 fkc = self.metadata.get(CREATE_FKS, {}) create_status = ImporterCreateTagsStatus(0, fkc.pop(TOTAL, 0)) try: self.metadata[FTS_CREATED_M2MS] = {} if not fkc: return count self.status_controller.start(create_status) count += self.bulk_folders_create( fkc.pop(Folder, frozenset()), create_status ) count += self.bulk_create_all_models(create_status) finally: self.metadata.pop(CREATE_FKS, None) self.status_controller.finish(create_status) return count def update_all_fks(self) -> int: """Bulk update all foreign keys.""" count = 0 fku = self.metadata.get(UPDATE_FKS, {}) update_status = ImporterUpdateTagsStatus(0, fku.pop(TOTAL, 0)) try: self.metadata[FTS_UPDATED_M2MS] = {} if not fku: return count self.status_controller.start(update_status) count += self.bulk_folders_update( fku.pop(Folder, frozenset()), update_status ) count += self.bulk_update_all_models(update_status) finally: self.metadata.pop(UPDATE_FKS, None) self.status_controller.finish(update_status) return count ================================================ FILE: codex/librarian/scribe/importer/create/link_fks.py ================================================ """Bulk update m2m fields foreign keys.""" from contextlib import suppress from pathlib import Path from typing import TYPE_CHECKING from comicbox.schemas.comicbox import PROTAGONIST_KEY from codex.librarian.scribe.importer.const import ( LINK_FKS, PARENT_FOLDER_FIELD_NAME, PROTAGONIST_FIELD_MODEL_MAP, VOLUME_FIELD_NAME, ) from codex.librarian.scribe.importer.link import LinkComicsImporter from codex.librarian.scribe.importer.link.const import ( DEFAULT_KEY_RELS, GROUP_KEY_RELS, ) from codex.models import Folder from codex.models.comic import Comic if TYPE_CHECKING: from codex.models.base import BaseModel class CreateForeignKeyLinksImporter(LinkComicsImporter): """Link comics methods.""" def _get_comic_folder_fk_link(self, md, subkey: int | str, path: str) -> None: parent_path = str(Path(path).parent) md[PARENT_FOLDER_FIELD_NAME] = Folder.objects.get(path=parent_path) self.add_links_to_fts( subkey, PARENT_FOLDER_FIELD_NAME, (parent_path,), ) def _get_comic_protagonist_fk_link(self, md, link_fks: dict[str, tuple]) -> None: """Protagonist does not create. Only links.""" if name := link_fks.pop(PROTAGONIST_KEY, None): name = name[0] for field_name, protagonist_model in PROTAGONIST_FIELD_MODEL_MAP.items(): value = None if name: with suppress(protagonist_model.DoesNotExist): value = protagonist_model.objects.get(name=name) md[field_name] = value def _get_comic_simple_fk_links( self, md, subkey: int | str, link_fks: dict[str, tuple] ) -> None: for field_name in tuple(link_fks.keys()): model: type[BaseModel] = Comic._meta.get_field(field_name).related_model # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment]│ key_rels = GROUP_KEY_RELS.get(model, DEFAULT_KEY_RELS) values = link_fks.pop(field_name) filter_dict = dict(zip(key_rels, values, strict=True)) md[field_name] = model.objects.get(**filter_dict) if field_name != VOLUME_FIELD_NAME: self.add_links_to_fts( subkey, field_name, (values[-1],), ) def get_comic_fk_links(self, subkey: str | int, path: str) -> dict: """Get links for all foreign keys for creating and updating.""" md = {} self._get_comic_folder_fk_link(md, subkey, path) if link_fks := self.metadata[LINK_FKS].pop(path, {}): self._get_comic_protagonist_fk_link(md, link_fks) self._get_comic_simple_fk_links(md, subkey, link_fks) return md ================================================ FILE: codex/librarian/scribe/importer/delete/__init__.py ================================================ """Clean up the database after moves or imports.""" from codex.librarian.scribe.importer.delete.folders import DeletedFoldersImporter from codex.librarian.scribe.timestamp_update import TimestampUpdater class DeletedImporter(DeletedFoldersImporter): """Delete database objects methods.""" def delete(self) -> None: """Delete files and folders.""" if self.abort_event.is_set(): return self.counts.folders_deleted += self.bulk_folders_deleted() if self.abort_event.is_set(): return self.counts.comics_deleted, deleted_comic_groups = self.bulk_comics_deleted() if self.abort_event.is_set(): return self.counts.covers_deleted = self.bulk_covers_deleted() if self.abort_event.is_set(): return timestamp_updater = TimestampUpdater( self.log, self.librarian_queue, self.db_write_lock ) timestamp_updater.update_library_groups( self.library, self.start_time, deleted_comic_groups ) ================================================ FILE: codex/librarian/scribe/importer/delete/comics.py ================================================ """Delete comics methods.""" from codex.librarian.scribe.importer.const import ALL_COMIC_GROUP_FIELD_NAMES from codex.librarian.scribe.importer.delete.covers import DeletedCoversImporter from codex.librarian.scribe.importer.statii.delete import ImporterRemoveComicsStatus from codex.models import Comic, Folder, StoryArc from codex.settings import ( IMPORTER_DELETE_MAX_CHUNK_SIZE, IMPORTER_LINK_FK_BATCH_SIZE, ) class DeletedComicsImporter(DeletedCoversImporter): """Delete comics methods.""" @staticmethod def _init_deleted_comic_groups() -> dict: """Init deleted_comic_groups, used later even if no deletes.""" deleted_comic_groups = {} for field_name in ALL_COMIC_GROUP_FIELD_NAMES: if field_name == "story_arc_numbers": related_model = StoryArc else: related_model = Comic._meta.get_field(field_name).related_model deleted_comic_groups[related_model] = set() return deleted_comic_groups @staticmethod def _populate_deleted_comic_group(deleted_comic_groups, comic) -> None: for field_name in ALL_COMIC_GROUP_FIELD_NAMES: if field_name == "story_arc_numbers": for san in comic.story_arc_numbers.select_related("story_arc").only( "story_arc" ): deleted_comic_groups[StoryArc].add(san.story_arc.pk) elif field_name == "folders": for folder in comic.folders.only("pk"): deleted_comic_groups[Folder].add(folder.pk) else: related_model = comic._meta.get_field(field_name).related_model related_id = getattr(comic, field_name).pk deleted_comic_groups[related_model].add(related_id) @classmethod def _populate_deleted_comic_groups(cls, delete_qs, deleted_comic_groups) -> None: """Populate changed groups for cover timestamp updater.""" comics_deleted_qs = delete_qs.only( *ALL_COMIC_GROUP_FIELD_NAMES ).prefetch_related("story_arc_numbers__story_arc") for comic in comics_deleted_qs.iterator( chunk_size=IMPORTER_DELETE_MAX_CHUNK_SIZE ): cls._populate_deleted_comic_group(deleted_comic_groups, comic) def bulk_comics_deleted(self, **kwargs) -> tuple[int, dict]: """Bulk delete comics found missing from the filesystem.""" count = 0 deleted_comic_groups = self._init_deleted_comic_groups() status = ImporterRemoveComicsStatus(0, len(self.task.files_deleted)) try: if not self.task.files_deleted: return count, deleted_comic_groups self.status_controller.start(status) # Batch path__in to stay under SQLite's variable limit. paths = tuple(self.task.files_deleted) self.task.files_deleted = frozenset() delete_comic_pks: set[int] = set() for start in range(0, len(paths), IMPORTER_LINK_FK_BATCH_SIZE): if self.abort_event.is_set(): break batch_paths = paths[start : start + IMPORTER_LINK_FK_BATCH_SIZE] delete_qs = Comic.objects.filter( library=self.library, path__in=batch_paths ) self._populate_deleted_comic_groups(delete_qs, deleted_comic_groups) delete_comic_pks.update(delete_qs.values_list("pk", flat=True)) delete_qs.delete() count = len(delete_comic_pks) self.remove_covers(delete_comic_pks, custom=False) finally: self.status_controller.finish(status) return count, deleted_comic_groups ================================================ FILE: codex/librarian/scribe/importer/delete/covers.py ================================================ """Clean up covers from the db.""" from codex.librarian.covers.tasks import CoverRemoveTask from codex.librarian.scribe.importer.search import SearchIndexImporter from codex.librarian.scribe.importer.statii.delete import ImporterRemoveCoversStatus from codex.models.paths import CustomCover class DeletedCoversImporter(SearchIndexImporter): """Clean up covers from the db.""" def remove_covers(self, delete_pks, *, custom: bool) -> None: """Queue a remove covers task.""" task = CoverRemoveTask(delete_pks, custom) self.librarian_queue.put(task) def bulk_covers_deleted(self, **kwargs) -> int: """Bulk delete comics found missing from the filesystem.""" status = ImporterRemoveCoversStatus( 0, len(self.task.covers_deleted), log_success=False ) try: if not self.task.covers_deleted: return 0 self.status_controller.start(status) covers = CustomCover.objects.filter( library=self.library, path__in=self.task.covers_deleted ) self.task.covers_deleted = frozenset() delete_cover_pks = frozenset(covers.values_list("pk", flat=True)) count, _ = covers.delete() self.remove_covers(delete_cover_pks, custom=True) finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/delete/folders.py ================================================ """Delete database folders methods.""" from codex.librarian.scribe.importer.delete.comics import DeletedComicsImporter from codex.librarian.scribe.importer.statii.delete import ImporterRemoveFoldersStatus from codex.models.comic import Comic from codex.models.groups import Folder class DeletedFoldersImporter(DeletedComicsImporter): """Delete database folders methods.""" def bulk_folders_deleted(self, **kwargs) -> int: """Bulk delete folders.""" status = ImporterRemoveFoldersStatus(0, len(self.task.dirs_deleted)) try: if not self.task.dirs_deleted: return 0 self.status_controller.start(status) folders = Folder.objects.filter( library=self.library, path__in=self.task.dirs_deleted ) self.task.dirs_deleted = frozenset() delete_comic_pks = frozenset( Comic.objects.filter(library=self.library, folders__in=folders) .distinct() .values_list("pk", flat=True) ) folders.delete() count = len(delete_comic_pks) self.remove_covers(delete_comic_pks, custom=False) finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/failed/__init__.py ================================================ """Failed imports.""" ================================================ FILE: codex/librarian/scribe/importer/failed/create.py ================================================ """Update and create failed imports.""" from django.db.models.functions import Now from codex.librarian.scribe.importer.const import ( BULK_UPDATE_FAILED_IMPORT_FIELDS, CREATE_FIS, UPDATE_FIS, ) from codex.librarian.scribe.importer.failed.query import FailedImportsQueryImporter from codex.librarian.scribe.importer.statii.failed import ( ImporterFailedImportsCreateStatus, ImporterFailedImportsUpdateStatus, ) from codex.models import FailedImport class FailedImportsCreateUpdateImporter(FailedImportsQueryImporter): """Methods for failed imports.""" def _bulk_update_failed_imports( self, status: ImporterFailedImportsUpdateStatus | None ) -> None: """Bulk update failed imports.""" update_failed_imports = self.metadata.pop(UPDATE_FIS, None) try: if not update_failed_imports: return if status: self.status_controller.start(status) update_failed_import_objs = FailedImport.objects.filter( library=self.library, path__in=update_failed_imports.keys() ).only(*BULK_UPDATE_FAILED_IMPORT_FIELDS) if not update_failed_import_objs: return for fi in update_failed_import_objs: try: exc = update_failed_imports.pop(fi.path) fi.set_reason(exc) fi.updated_at = Now() fi.presave() except OSError as exc: self.log.warning(f"Presaving failed import {fi.path}: {exc}") except Exception: self.log.exception( f"Error preparing failed import update for {fi.path}" ) FailedImport.objects.bulk_update( update_failed_import_objs, fields=BULK_UPDATE_FAILED_IMPORT_FIELDS ) count = len(update_failed_import_objs) level = "INFO" if count else "DEBUG" self.log.log(level, f"Updated {count} old failed imports.") finally: self.status_controller.finish(status) def _bulk_create_failed_imports( self, status: ImporterFailedImportsCreateStatus | None ) -> int: """Bulk create failed imports.""" create_failed_imports = self.metadata.pop(CREATE_FIS, None) try: if not create_failed_imports: return 0 if status: self.status_controller.start(status) create_objs = [] for path, exc in create_failed_imports.items(): try: fi = FailedImport( library=self.library, path=path, parent_folder=None ) fi.set_reason(exc) create_objs.append(fi) fi.presave() except OSError: self.log.warning( f"Error preparing failed import create for {path}: {exc}" ) except Exception: self.log.exception( f"Error preparing failed import create for {path}" ) count = len(create_objs) if count: FailedImport.objects.bulk_create( create_objs, update_conflicts=True, update_fields=BULK_UPDATE_FAILED_IMPORT_FIELDS, unique_fields=FailedImport._meta.unique_together[0], ) level = "INFO" if count else "DEBUG" self.log.log(level, f"Added {count} comics to failed imports.") finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/failed/failed.py ================================================ """Update and create failed imports.""" from codex.librarian.scribe.importer.const import DELETE_FI_PATHS from codex.librarian.scribe.importer.failed.create import ( FailedImportsCreateUpdateImporter, ) from codex.librarian.scribe.importer.statii.failed import ( ImporterFailedImportsDeleteStatus, ) from codex.models import FailedImport class FailedImportsImporter(FailedImportsCreateUpdateImporter): """Methods for failed imports.""" def _bulk_cleanup_failed_imports( self, status: ImporterFailedImportsDeleteStatus | None ) -> int: """Remove FailedImport objects that have since succeeded.""" delete_failed_imports_paths = self.metadata.pop(DELETE_FI_PATHS, None) try: if not delete_failed_imports_paths: return 0 if status: self.status_controller.start(status) # Cleanup FailedImports that were actually successful qs = FailedImport.objects.filter(library=self.library).filter( path__in=delete_failed_imports_paths ) count, _ = qs.delete() level = "INFO" if count else "DEBUG" self.log.log( level, f"Cleaned up {count} failed imports from {self.library.path}" ) return count finally: self.status_controller.finish(status) def fail_imports(self) -> None: """Handle failed imports.""" created_count = 0 update_status = create_status = delete_status = None try: update_status, create_status, delete_status = self._query_failed_imports() self._bulk_update_failed_imports(update_status) created_count += self._bulk_create_failed_imports(create_status) self._bulk_cleanup_failed_imports(delete_status) except Exception: self.log.exception("Processing failed imports") finally: self.status_controller.finish_many( (update_status, create_status, delete_status) ) self.counts.failed_imports = created_count ================================================ FILE: codex/librarian/scribe/importer/failed/query.py ================================================ """Update and create failed imports.""" from pathlib import Path from codex.librarian.scribe.importer.const import ( CREATE_FIS, DELETE_FI_PATHS, FIS, UPDATE_FIS, ) from codex.librarian.scribe.importer.delete import DeletedImporter from codex.librarian.scribe.importer.statii.failed import ( ImporterFailedImportsCreateStatus, ImporterFailedImportsDeleteStatus, ImporterFailedImportsQueryStatus, ImporterFailedImportsUpdateStatus, ) from codex.models import Comic, FailedImport class FailedImportsQueryImporter(DeletedImporter): """Methods for failed imports.""" def _query_failed_import_deletes(self, existing_failed_import_paths, new_fis_paths): """Calculate Deletes.""" untouched_failed_import_paths = existing_failed_import_paths - frozenset( new_fis_paths ) qs = Comic.objects.filter( library=self.library, path__in=untouched_failed_import_paths ).only("path") succeeded_failed_imports = frozenset(qs.values_list("path", flat=True)) possibly_missing_failed_import_paths = ( untouched_failed_import_paths - succeeded_failed_imports ) missing_failed_import_paths = set() for path in possibly_missing_failed_import_paths: name = path.casefold() # Case sensitive matching. exists() and is_file() are case insensitive. # This will fail if there is a parent directory case mismatch. # Rather than do a recursive solution, add it to missing if it fails. try: for path_obj in Path(path).parent.iterdir(): if path_obj.name.casefold() == name: break else: missing_failed_import_paths.add(path) except FileNotFoundError: missing_failed_import_paths.add(path) return succeeded_failed_imports | missing_failed_import_paths def _query_failed_imports( self, ) -> tuple[ ImporterFailedImportsUpdateStatus | None, ImporterFailedImportsCreateStatus | None, ImporterFailedImportsDeleteStatus | None, ]: """Determine what to do with failed imports.""" status = ImporterFailedImportsQueryStatus(0) update_status = create_status = delete_status = None try: self.status_controller.start(status) # Calculate creates and updates fis = self.metadata.pop(FIS, {}) existing_failed_import_paths = set( FailedImport.objects.filter(library=self.library).values_list( "path", flat=True ) ) status.total = len(fis) + len(existing_failed_import_paths) self.status_controller.update(status) self.metadata[UPDATE_FIS] = {} self.metadata[CREATE_FIS] = {} for path, exc in fis.items(): if path in existing_failed_import_paths: self.metadata[UPDATE_FIS][path] = exc else: self.metadata[CREATE_FIS][path] = exc status.increment_complete() self.status_controller.update(status) if num_update := len(self.metadata[UPDATE_FIS]): update_status = ImporterFailedImportsUpdateStatus(0, num_update) self.status_controller.update(update_status) if num_create := len(self.metadata[CREATE_FIS]): create_status = ImporterFailedImportsCreateStatus(0, num_create) self.status_controller.update(create_status) if DELETE_FI_PATHS not in self.metadata: self.metadata[DELETE_FI_PATHS] = set() self.metadata[DELETE_FI_PATHS] |= self._query_failed_import_deletes( existing_failed_import_paths, fis.keys() ) status.increment_complete(len(existing_failed_import_paths)) if num_delete := len(self.metadata[DELETE_FI_PATHS]): delete_status = ImporterFailedImportsDeleteStatus(0, num_delete) self.status_controller.update(delete_status) finally: self.status_controller.finish(status, notify=True) return update_status, create_status, delete_status ================================================ FILE: codex/librarian/scribe/importer/finish.py ================================================ """The main importer class.""" from time import time from types import MappingProxyType from django.core.cache import cache from humanize import intcomma, naturaldelta from codex.librarian.notifier.tasks import ( FAILED_IMPORTS_CHANGED_TASK, LIBRARY_CHANGED_TASK, ) from codex.librarian.scribe.importer.init import InitImporter from codex.librarian.scribe.importer.statii import IMPORTER_STATII from codex.librarian.scribe.search.status import SEARCH_INDEX_STATII from codex.librarian.scribe.status import SCRIBE_STATII _REPORT_MAP = MappingProxyType( { "comics_moved": "comics moved", "folders_moved": "folders moved", "covers_moved": " covers moved", "comic": "comics imported", "folders": "folders imported", "tags": "tags imported", "covers": "custom covers imported", "link": "tags linked", "link_covers": "covers linked", "comics_deleted": "comics deleted", "tags_deleted": "tags deleted", "folders_deleted": "folders deleted", } ) _FINISH_STATII = (*IMPORTER_STATII, *SEARCH_INDEX_STATII, *SCRIBE_STATII) class FinishImporter(InitImporter): """Initialize, run and finish a bulk import.""" def _get_log_finish_changed_text(self, elapsed, elapsed_time) -> str: cache.clear() log_txt = f"Imported library {self.library.path} in {elapsed}" if self.counts.comic: cps = round(self.counts.comic / elapsed_time, 1) cps = intcomma(cps) log_txt += f" at {cps} comics per second" else: log_txt += " but no comics were imported" log_txt += "." for attr, suffix in _REPORT_MAP.items(): if value := getattr(self.counts, attr): value = intcomma(value) log_txt += f" {value} {suffix}." self.librarian_queue.put(LIBRARY_CHANGED_TASK) return log_txt def _log_finish(self) -> None: """Log Finish.""" elapsed_time = time() - self.start_time.timestamp() elapsed = naturaldelta(elapsed_time) if self.counts.changed(): log_txt = self._get_log_finish_changed_text(elapsed, elapsed_time) else: log_txt = f"No updates necessary for library {self.library.path}. Finished in {elapsed}." self.log.success(log_txt) def finish(self) -> None: """Perform final tasks when the apply is done.""" if self.abort_event.is_set(): self.log.info("Import task aborted early.") self.abort_event.clear() self.library.end_update() self.status_controller.finish_many(_FINISH_STATII) self._log_finish() if self.counts.failed_imports: self.librarian_queue.put(FAILED_IMPORTS_CHANGED_TASK) ================================================ FILE: codex/librarian/scribe/importer/importer.py ================================================ """The main importer class.""" from codex.librarian.scribe.importer.moved import MovedImporter _METHODS = ( "init_apply", "move_and_modify_dirs", "read", "query", "create_and_update", "link", "fail_imports", "delete", "full_text_search", ) class ComicImporter(MovedImporter): """Initialize, run and finish a bulk import.""" def apply(self) -> None: """Bulk import comics.""" try: self.abort_event.clear() for name in _METHODS: method = getattr(self, name) method() if self.abort_event.is_set(): return finally: self.finish() ================================================ FILE: codex/librarian/scribe/importer/init.py ================================================ """Initialize Importer.""" from dataclasses import asdict, dataclass from multiprocessing.queues import Queue from pathlib import Path from time import sleep, time from typing import Any from django.utils.timezone import now from codex.librarian.scribe.importer.statii.create import ( ImporterCreateComicsStatus, ImporterCreateCoversStatus, ImporterCreateTagsStatus, ImporterUpdateComicsStatus, ImporterUpdateCoversStatus, ImporterUpdateTagsStatus, ) from codex.librarian.scribe.importer.statii.delete import ( ImporterRemoveComicsStatus, ImporterRemoveCoversStatus, ImporterRemoveFoldersStatus, ) from codex.librarian.scribe.importer.statii.link import ( ImporterLinkCoversStatus, ImporterLinkTagsStatus, ) from codex.librarian.scribe.importer.statii.moved import ( ImporterMoveComicsStatus, ImporterMoveCoversStatus, ImporterMoveFoldersStatus, ) from codex.librarian.scribe.importer.statii.query import ( ImporterQueryComicUpdatesStatus, ImporterQueryMissingCoversStatus, ImporterQueryMissingTagsStatus, ImporterQueryTagLinksStatus, ) from codex.librarian.scribe.importer.statii.read import ( ImporterAggregateStatus, ImporterReadComicsStatus, ) from codex.librarian.scribe.importer.statii.search import ( ImporterFTSCreateStatus, ImporterFTSUpdateStatus, ) from codex.librarian.scribe.importer.tasks import ImportTask from codex.librarian.scribe.search.status import SearchIndexCleanStatus from codex.librarian.scribe.status import UpdateGroupTimestampsStatus from codex.librarian.worker import WorkerStatusBase from codex.models import Library from codex.settings import LOGLEVEL _WRITE_WAIT_EXPIRY = 60 @dataclass class Counts: """Total counts of operations.""" comic: int = 0 tags: int = 0 link: int = 0 link_covers: int = 0 folders: int = 0 comics_deleted: int = 0 folders_deleted: int = 0 tags_deleted: int = 0 covers: int = 0 covers_deleted: int = 0 comics_moved: int = 0 folders_moved: int = 0 covers_moved: int = 0 failed_imports: int = 0 def _any(self, exclude_prefixes: tuple[str, ...]) -> bool: return any( value for key, value in asdict(self).items() if not key.startswith(exclude_prefixes) ) def changed(self): """Anything changed at all.""" return self._any(("failed",)) def search_changed(self): """Is the search index be out of date.""" return self._any( ( "cover", "failed", ) ) class InitImporter(WorkerStatusBase): """Initial Importer.""" def __init__( self, task: ImportTask, logger_, librarian_queue: Queue, db_write_lock, event ) -> None: """Initialize the import.""" super().__init__(logger_, librarian_queue, db_write_lock) self.task: ImportTask = task self.metadata: dict[str, Any] = {} self.counts = Counts() self.library = Library.objects.only("path").get(pk=self.task.library_id) self.abort_event = event self.start_time = now() self._is_log_debug_task = ( self.log.level(LOGLEVEL).no <= self.log.level("DEBUG").no ) def _wait_for_filesystem_ops_to_finish(self) -> bool: """Watcher sends events before filesystem events finish, so wait for them.""" started_checking = time() # Don't wait for deletes to complete. # Do wait for move, modified, create files before import. all_modified_paths = ( frozenset(self.task.dirs_moved.values()) | frozenset(self.task.files_moved.values()) | self.task.dirs_modified | self.task.files_modified | self.task.files_created ) old_total_size = -1 total_size = 0 wait_time = 2 while old_total_size != total_size: if old_total_size > 0: # second time around or more sleep(wait_time) wait_time = wait_time**2 reason = ( f"Waiting for files to copy before import: " f"{old_total_size} != {total_size}" ) self.log.debug(reason) if time() - started_checking > _WRITE_WAIT_EXPIRY: return True old_total_size = total_size total_size = 0 for path_str in all_modified_paths: path = Path(path_str) if path.exists(): total_size += Path(path).stat().st_size return False ####### # LOG # ####### def _log_task_construct_dirs_log(self) -> list: """Construct dirs log line.""" dirs_log = [] if self.task.dirs_moved: dirs_log += [f"{len(self.task.dirs_moved)} moved"] if self.task.dirs_modified: dirs_log += [f"{len(self.task.dirs_modified)} modified"] if self.task.dirs_deleted: dirs_log += [f"{len(self.task.dirs_deleted)} deleted"] return dirs_log def _log_task_construct_comics_log(self) -> list: """Construct comcis log line.""" comics_log = [] if self.task.files_moved: comics_log += [f"{len(self.task.files_moved)} moved"] if self.task.files_modified: comics_log += [f"{len(self.task.files_modified)} modified"] if self.task.files_created: comics_log += [f"{len(self.task.files_created)} created"] if self.task.files_deleted: comics_log += [f"{len(self.task.files_deleted)} deleted"] return comics_log def _log_task(self) -> None: """Log the fs watcher event self.task.""" if not self._is_log_debug_task: return self.log.debug(f"Updating library {self.library.path}...") comics_log = self._log_task_construct_comics_log() if comics_log: log = "Comics: " log += ", ".join(comics_log) self.log.debug(" " + log) dirs_log = self._log_task_construct_dirs_log() if dirs_log: log = "Folders: " log += ", ".join(dirs_log) self.log.debug(" " + log) ######## # INIT # ######## def _init_librarian_status_moved(self, status_list) -> int: """Initialize moved statuses.""" search_index_updates = 0 if self.task.dirs_moved: status_list += [ImporterMoveFoldersStatus(None, len(self.task.dirs_moved))] if self.task.files_moved: status_list += [ImporterMoveComicsStatus(None, len(self.task.files_moved))] search_index_updates += len(self.task.files_moved) if self.task.covers_moved: status_list += [ImporterMoveCoversStatus(None, len(self.task.covers_moved))] return search_index_updates def _init_if_modified_or_created(self, path, status_list) -> tuple: """Initialize librarian statuses for modified or created ops.""" total_paths = len(self.task.files_modified) + len(self.task.files_created) status_list += [ ImporterReadComicsStatus(0, total_paths, subtitle=path), ImporterAggregateStatus(0, total_paths, subtitle=path), ImporterQueryMissingTagsStatus(subtitle=path), ImporterQueryComicUpdatesStatus(subtitle=path), ImporterQueryTagLinksStatus(subtitle=path), ImporterQueryMissingCoversStatus(subtitle=path), ImporterCreateTagsStatus(subtitle=path), ImporterUpdateTagsStatus(subtitle=path), ] if self.task.covers_modified: status_list += [ ImporterUpdateCoversStatus( None, len(self.task.covers_modified), subtitle=path, ) ] if self.task.covers_created: status_list += [ ImporterCreateCoversStatus( None, len(self.task.covers_created), subtitle=path, ) ] if self.task.files_created or self.task.covers_created: status_list += [ ImporterCreateComicsStatus( None, len(self.task.files_created), subtitle=path, ) ] if self.task.files_modified: status_list += [ ImporterUpdateComicsStatus( None, len(self.task.files_modified), subtitle=path, ) ] if self.task.files_modified or self.task.files_created: status_list += [ImporterLinkTagsStatus(subtitle=path)] num_covers_linked = ( len(self.task.covers_moved) + len(self.task.covers_modified) + len(self.task.covers_created) ) if num_covers_linked: status_list += [ ImporterLinkCoversStatus( None, num_covers_linked, subtitle=path, ) ] modified = len(self.task.files_moved) + len(self.task.files_modified) created = len(self.task.files_created) return modified, created def _init_librarian_status_deleted(self, status_list) -> int: """Init deleted statuses.""" search_index_updates = 0 if self.task.files_deleted: status_list += [ ImporterRemoveComicsStatus(None, len(self.task.files_deleted)) ] search_index_updates += len(self.task.files_deleted) if self.task.dirs_deleted: status_list += [ ImporterRemoveFoldersStatus(None, len(self.task.dirs_deleted)) ] if self.task.covers_deleted: status_list += [ ImporterRemoveCoversStatus(None, len(self.task.covers_deleted)) ] return search_index_updates @staticmethod def _init_librarian_status_search_index( comic_updates, comic_creates, comic_deletes, status_list ) -> None: """Init search index statuses.""" status_list += [ SearchIndexCleanStatus(total=comic_deletes), ImporterFTSUpdateStatus(total=comic_updates), ImporterFTSCreateStatus(total=comic_creates), ] def _init_librarian_status(self, path) -> None: """Update the librarian status self.tasks.""" status_list = [] moved = self._init_librarian_status_moved(status_list) modified, created = self._init_if_modified_or_created(path, status_list) deleted = self._init_librarian_status_deleted(status_list) status_list += [UpdateGroupTimestampsStatus()] self._init_librarian_status_search_index( modified + moved, created, deleted, status_list, ) self.status_controller.start_many(status_list) def init_apply(self) -> None: """Initialize the library and status flags.""" self.start_time = now() self.library.start_update() too_long = self._wait_for_filesystem_ops_to_finish() if too_long: reason = ( "Import apply waited for the filesystem to stop changing too long. " "Try polling again once files have finished copying" f" in library: {self.library.path}" ) self.log.warning(reason) return self._log_task() self._init_librarian_status(self.library.path) ================================================ FILE: codex/librarian/scribe/importer/link/__init__.py ================================================ """Bulk update m2m fields.""" from codex.librarian.scribe.importer.link.many_to_many import LinkManyToManyImporter class LinkComicsImporter(LinkManyToManyImporter): """Link comics methods.""" def link(self) -> None: """Link tags and covers.""" self.counts.link += self.link_comic_m2m_fields() if self.abort_event.is_set(): return if count := self.link_custom_covers(): self.counts.link_covers += count ================================================ FILE: codex/librarian/scribe/importer/link/const.py ================================================ """Link constants.""" from types import MappingProxyType from codex.librarian.scribe.importer.const import ( CREDITS_FIELD_NAME, IDENTIFIERS_FIELD_NAME, STORY_ARC_NUMBERS_FIELD_NAME, ) from codex.models.base import BaseModel from codex.models.groups import Imprint, Series, Volume COMPLEX_MODEL_FIELD_NAMES = ( CREDITS_FIELD_NAME, STORY_ARC_NUMBERS_FIELD_NAME, IDENTIFIERS_FIELD_NAME, ) DEFAULT_KEY_RELS = ("name",) GROUP_KEY_RELS: MappingProxyType[type[BaseModel], tuple[str, ...]] = MappingProxyType( { Imprint: ("publisher__name", "name"), Series: ("publisher__name", "imprint__name", "name"), Volume: ( "publisher__name", "imprint__name", "series__name", "name", "number_to", ), } ) ================================================ FILE: codex/librarian/scribe/importer/link/covers.py ================================================ """Link Covers.""" from pathlib import Path from codex.librarian.scribe.importer.const import ( CLASS_CUSTOM_COVER_GROUP_MAP, LINK_COVER_PKS, ) from codex.librarian.scribe.importer.failed.failed import FailedImportsImporter from codex.librarian.scribe.importer.statii.link import ImporterLinkCoversStatus from codex.models import CustomCover, Folder class LinkCoversImporter(FailedImportsImporter): """Link Covers methods.""" def _link_custom_cover_prepare(self, cover, model_map) -> None: """Prepare one cover in the model map for bulk update.""" if cover.library and cover.library.covers_only: model = CLASS_CUSTOM_COVER_GROUP_MAP.inverse.get(cover.group) if not model: self.log.warning(f"Custom Cover model not found for {cover.path}") return group_filter = {"sort_name": cover.sort_name} else: model = Folder path = str(Path(cover.path).parent) group_filter = {"path": path} qs = model.objects.filter(**group_filter).exclude(custom_cover=cover) if not qs.exists(): return if model not in model_map: model_map[model] = [] for obj in qs.iterator(): obj.custom_cover = cover model_map[model].append(obj) def _link_custom_cover_group(self, model, objs, status) -> None: """Bulk link a group to it's custom covers.""" if not objs: return model.objects.bulk_update(objs, ["custom_cover"]) status.complete += len(objs) self.status_controller.update(status) def link_custom_covers(self) -> int | None: """Link Custom Covers to Groups.""" link_cover_pks = self.metadata.get(LINK_COVER_PKS, {}) num_link_cover_pks = len(link_cover_pks) status = ImporterLinkCoversStatus(0, num_link_cover_pks) try: if not num_link_cover_pks: return 0 self.status_controller.start(status) # Aggregate objs to update for each group model. model_map = {} covers = CustomCover.objects.filter(pk__in=link_cover_pks).only( "library", "path" ) for cover in covers: self._link_custom_cover_prepare(cover, model_map) # Bulk update each model type for model, objs in model_map.items(): self._link_custom_cover_group(model, objs, status) finally: self.status_controller.finish(status) return status.complete ================================================ FILE: codex/librarian/scribe/importer/link/delete.py ================================================ """Delete stale m2ms.""" from typing import TYPE_CHECKING from django.db.models import ManyToManyField, Q from codex.librarian.scribe.importer.const import ( DELETE_M2MS, FTS_EXISTING_M2MS, FTS_UPDATE, ) from codex.librarian.scribe.importer.link.prepare import LinkComicsImporterPrepare from codex.models import Comic from codex.models.base import BaseModel from codex.settings import IMPORTER_FILTER_BATCH_SIZE if TYPE_CHECKING: from django.db.models.fields.related import ManyToManyRel class LinkImporterDelete(LinkComicsImporterPrepare): """Delete stale m2ms.""" @staticmethod def get_through_model(field: ManyToManyField) -> type[BaseModel]: """Get the through model for a m2m field.""" remote_field: ManyToManyRel = field.remote_field # pyright: ignore[reportAssignmentType],# ty: ignore[invalid-assignment] return remote_field.through # pyright: ignore[reportReturnType],# ty: ignore[invalid-return-type] def _delete_m2m_field_batch( self, column_name: str, through_model: type[BaseModel], batch_rows: tuple, comic_ids: set[int], ): del_filter = Q() for row in batch_rows: comic_id, model_id = row del_filter_dict = {"comic_id": comic_id, column_name: model_id} del_filter |= Q(**del_filter_dict) comic_ids.add(comic_id) qs = through_model.objects.filter(del_filter) count, _ = qs.delete() return count def _delete_m2m_fts_entries(self, field_name: str, comic_ids: set[int]) -> None: fts_field_name = ( "story_arcs" if field_name == "story_arc_numbers" else field_name ) for comic_id in comic_ids: if not self.metadata.get(FTS_UPDATE, {}).get(comic_id, {}).get( fts_field_name ) and not self.metadata.get(FTS_EXISTING_M2MS, {}).get(comic_id, {}).get( fts_field_name ): self.add_links_to_fts(comic_id, fts_field_name, ()) def delete_m2m_field(self, field_name: str, delete_m2ms: dict, status) -> int: """Delete one comic field's m2m relations.""" total_field_count = 0 rows = tuple(delete_m2ms.pop(field_name, ())) num_rows = len(rows) if not num_rows: return total_field_count status.subtitle = f"Delete stale {field_name} links" self.status_controller.update(status) field: ManyToManyField = Comic._meta.get_field(field_name) # pyright:ignore[reportAssignmentType], # ty: ignore[invalid-assignment]│ related_model = field.related_model table_name = related_model._meta.db_table column_name = table_name.removeprefix("codex_") + "_id" through_model = self.get_through_model(field) comic_ids = set() start = 0 while start < num_rows: end = start + IMPORTER_FILTER_BATCH_SIZE batch_rows = rows[start:end] count = self._delete_m2m_field_batch( column_name, through_model, batch_rows, comic_ids ) status.complete += count total_field_count += count if count: self.log.info( f"Deleted {total_field_count}/{num_rows} stale {field_name} relations for altered comics.", ) self.status_controller.update(status) start += IMPORTER_FILTER_BATCH_SIZE self._delete_m2m_fts_entries(field_name, comic_ids) return total_field_count def delete_m2ms(self, status) -> int: """Delete old missing m2ms.""" delete_m2ms = self.metadata.pop(DELETE_M2MS, {}) del_total = 0 for field_name in sorted(delete_m2ms.keys()): del_total += self.delete_m2m_field(field_name, delete_m2ms, status) if del_total: self.log.info(f"Deleted {del_total} stale relations for altered comics.") return del_total ================================================ FILE: codex/librarian/scribe/importer/link/many_to_many.py ================================================ """Link Comics M2M fields.""" from typing import TYPE_CHECKING from codex.librarian.scribe.importer.const import ( DELETE_M2MS, LINK_M2MS, ) from codex.librarian.scribe.importer.link.sum import LinkSumImporter from codex.librarian.scribe.importer.statii.link import ImporterLinkTagsStatus from codex.librarian.status import Status from codex.models import Comic if TYPE_CHECKING: from django.db.models import ManyToManyField class LinkManyToManyImporter(LinkSumImporter): """Link Comics M2M fields.""" def link_comic_m2m_field(self, field_name, m2m_links, status: Status) -> int: """ Recreate an m2m field for a set of comics. Since we can't bulk_update or bulk_create m2m fields use a trick. bulk_create() on the through table: https://stackoverflow.com/questions/6996176/how-to-create-an-object-for-a-django-model-with-a-many-to-many-field/10116452#10116452 https://docs.djangoproject.com/en/dev/ref/models/fields/#django.db.models.ManyToManyField.through """ status.subtitle = field_name self.status_controller.update(status) field: ManyToManyField = Comic._meta.get_field(field_name) # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment]│ through_model = self.get_through_model(field) through_field_id_name = field.related_model.__name__.lower() + "_id" tms = [] for comic_id, model_ids in m2m_links.items(): for model_id in model_ids: args = {"comic_id": comic_id, through_field_id_name: model_id} tm = through_model(**args) tms.append(tm) count = len(tms) if count: update_fields = ("comic_id", through_field_id_name) through_model.objects.bulk_create( tms, update_conflicts=True, update_fields=update_fields, unique_fields=update_fields, ) status.increment_complete(count) self.status_controller.update(status) return count def link_comic_m2m_fields(self) -> int: """Combine query and bulk link into a batch.""" link_total = self.sum_ops(DELETE_M2MS) + self.sum_path_ops(LINK_M2MS) status = ImporterLinkTagsStatus(0, link_total) try: if not link_total: self.status_controller.finish(status) return link_total self.status_controller.start(status) del_total = self.delete_m2ms(status) # get ids for through model writing. all_m2m_links = self.link_prepare_m2m_links(status) num_links = sum( len(m2m_links.values()) for m2m_links in all_m2m_links.values() ) status.total = num_links created_total = 0 for field_name, m2m_links in all_m2m_links.items(): if self.abort_event.is_set(): return created_total + del_total try: created_total += self.link_comic_m2m_field( field_name, m2m_links, status ) except Exception: self.log.exception(f"Error recreating m2m field: {field_name}") finally: self.metadata.pop(LINK_M2MS, None) self.metadata.pop(DELETE_M2MS, None) self.status_controller.finish(status) return created_total + del_total ================================================ FILE: codex/librarian/scribe/importer/link/prepare.py ================================================ """Prepare links with database objects.""" from collections.abc import Callable, Mapping from typing import TYPE_CHECKING from django.db.models.query_utils import Q from codex.librarian.scribe.importer.const import ( FIELD_NAME_KEYS_REL_MAP, FOLDERS_FIELD_NAME, IDENTIFIERS_FIELD_NAME, LINK_M2MS, NON_FTS_FIELDS, ) from codex.librarian.scribe.importer.link.const import COMPLEX_MODEL_FIELD_NAMES from codex.librarian.scribe.importer.link.covers import ( LinkCoversImporter, ) from codex.models import Comic from codex.util import flatten if TYPE_CHECKING: from codex.models.base import BaseModel class LinkComicsImporterPrepare(LinkCoversImporter): """Prepare links with database objects.""" @staticmethod def _get_link_folders_filter(_field_name, values_set) -> Q: """Get the ids of all folders to link.""" folder_paths = frozenset(flatten(values_set)) return Q(path__in=folder_paths) @staticmethod def _get_link_complex_model_filter(field_name, values_set) -> Q: """Get the ids of all dict style objects to link.""" rels = FIELD_NAME_KEYS_REL_MAP[field_name] dict_filter = Q() for values in values_set: rel_complex = dict(zip(rels, values, strict=False)) dict_filter |= Q(**rel_complex) return dict_filter def _add_complex_link_to_fts( self, comic_pk: int, field_name: str, values: frozenset ) -> None: if field_name == IDENTIFIERS_FIELD_NAME: # sources extracton must come before identifiers is minified # but now identifiers is not indexed at all, yet sources are. sources = tuple(sorted({subvalues[0] for subvalues in values})) self.add_links_to_fts(comic_pk, "sources", sources) if field_name in NON_FTS_FIELDS: return field_name, fts_values = self.minify_complex_link_to_fts_tuple( field_name, values ) self.add_links_to_fts(comic_pk, field_name, fts_values) def _link_prepare_complex_m2ms( self, all_m2m_links: dict, md: dict, comic_pk: int, field_name: str, link_filter_method: Callable, ) -> None: """Prepare special m2m for linking.""" values = md.pop(field_name, None) if not values: return self._add_complex_link_to_fts(comic_pk, field_name, values) model: type[BaseModel] = Comic._meta.get_field(field_name).related_model # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment]│ m2m_filter = link_filter_method(field_name, values) pks = model.objects.filter(m2m_filter).values_list("pk", flat=True).distinct() if result := tuple(pks): if field_name not in all_m2m_links: all_m2m_links[field_name] = {} all_m2m_links[field_name][comic_pk] = result def _link_prepare_named_m2ms( self, all_m2m_links: dict, comic_pk: int, field_name: str, names: tuple[str, ...] | frozenset[str], ) -> None: """Set the ids of all named m2m fields into the comic dict.""" model = Comic._meta.get_field(field_name).related_model if model is None: self.log.error(f"No related class found for Comic.{field_name}") return self.add_links_to_fts(comic_pk, field_name, tuple(names)) names = frozenset(name[0] for name in names) pks = ( model.objects.filter(name__in=names).values_list("pk", flat=True).distinct() ) if result := tuple(pks): if field_name not in all_m2m_links: all_m2m_links[field_name] = {} all_m2m_links[field_name][comic_pk] = result def link_prepare_m2m_links(self, status) -> Mapping: """Get the complete m2m field data to create.""" status.subtitle = "Preparing..." self.status_controller.update(status) all_m2m_links = {} comic_paths = tuple(self.metadata.get(LINK_M2MS, {}).keys()) if not comic_paths: return all_m2m_links comics = Comic.objects.filter(path__in=comic_paths).values_list("pk", "path") for comic_pk, comic_path in comics: md = self.metadata[LINK_M2MS][comic_path] self._link_prepare_complex_m2ms( all_m2m_links, md, comic_pk, FOLDERS_FIELD_NAME, self._get_link_folders_filter, ) for field_name in COMPLEX_MODEL_FIELD_NAMES: self._link_prepare_complex_m2ms( all_m2m_links, md, comic_pk, field_name, self._get_link_complex_model_filter, ) for field, names in md.items(): self._link_prepare_named_m2ms(all_m2m_links, comic_pk, field, names) self.metadata.pop(LINK_M2MS, None) return all_m2m_links ================================================ FILE: codex/librarian/scribe/importer/link/sum.py ================================================ """Total methods for updating statii.""" from codex.librarian.scribe.importer.link.delete import LinkImporterDelete class LinkSumImporter(LinkImporterDelete): """Total methods for updating statii.""" def sum_path_ops(self, key) -> int: """Sum all the operations for the key.""" count = 0 for fields in self.metadata[key].values(): for ops in fields.values(): count += len(ops) return count def sum_ops(self, key): """Sum all the operations for the key.""" return sum(len(ops) for ops in self.metadata[key].values()) ================================================ FILE: codex/librarian/scribe/importer/moved/__init__.py ================================================ """Bulk import and move comics and folders.""" from pathlib import Path from django.db.models.functions import Now from codex.librarian.scribe.importer.const import BULK_UPDATE_FOLDER_MODIFIED_FIELDS from codex.librarian.scribe.importer.moved.folders import MovedFoldersImporter from codex.librarian.scribe.importer.statii.create import ImporterUpdateTagsStatus from codex.models import Folder class MovedImporter(MovedFoldersImporter): """Methods for moving comics and folders.""" def _bulk_folders_modified(self) -> int: """Update folders stat and nothing else.""" num_dirs_modified = len(self.task.dirs_modified) if not num_dirs_modified: return 0 status = ImporterUpdateTagsStatus(None, num_dirs_modified) status.subtitle = "Folders" self.status_controller.start(status) folders = Folder.objects.filter( library=self.library, path__in=self.task.dirs_modified ).only("stat", "updated_at") self.task.dirs_modified = frozenset() update_folders = [] for folder in folders.iterator(): if Path(folder.path).exists(): folder.updated_at = Now() folder.presave() update_folders.append(folder) Folder.objects.bulk_update( update_folders, fields=BULK_UPDATE_FOLDER_MODIFIED_FIELDS ) count = len(update_folders) level = "INFO" if count else "DEBUG" self.log.log(level, f"Modified {count} folders") status.complete = status.total = None self.status_controller.update(status) return count def move_and_modify_dirs(self) -> None: """Move files and dirs and modify dirs.""" # It would be nice to move folders instead of recreating them but it would require # an inode map from the snapshots to do correctly. self.counts.folders_moved += self.bulk_folders_moved() self.counts.comics_moved += self.bulk_comics_moved() self.counts.covers_moved += self.bulk_covers_moved() self.counts.folders += self._bulk_folders_modified() ================================================ FILE: codex/librarian/scribe/importer/moved/comics.py ================================================ """Bulk import and move comics.""" from pathlib import Path from django.db.models.functions import Now from codex.librarian.scribe.importer.const import ( CREATE_FKS, FOLDERS_FIELD_NAME, MOVED_BULK_COMIC_UPDATE_FIELDS, PARENT_FOLDER_FIELD_NAME, PATH_FIELD_NAME, ) from codex.librarian.scribe.importer.read import ReadMetadataImporter from codex.librarian.scribe.importer.statii.create import ImporterCreateTagsStatus from codex.librarian.scribe.importer.statii.moved import ImporterMoveComicsStatus from codex.librarian.scribe.importer.statii.query import ImporterQueryMissingTagsStatus from codex.models import Comic, Folder class MovedComicsImporter(ReadMetadataImporter): """Methods for moving comics and folders.""" def _bulk_comics_moved_ensure_folders(self) -> None: """Ensure folders we're moving to exist.""" dest_comic_paths = self.task.files_moved.values() dest_comic_paths = self.get_all_library_relative_paths(dest_comic_paths) num_dest_comic_paths = len(dest_comic_paths) if not num_dest_comic_paths: return # Not sending statues to the controller for now. status = ImporterQueryMissingTagsStatus() if CREATE_FKS not in self.metadata: self.metadata[CREATE_FKS] = {} proposed_values_map = {path: set() for path in dest_comic_paths} self.query_missing_models( Folder, proposed_values_map, status, ) create_folder_paths = self.metadata[CREATE_FKS].pop(Folder, {}) count = len(create_folder_paths) if not count: return status = ImporterCreateTagsStatus(0, count) self.log.debug( "Creating {count} folders for {num_dest_comic_paths} moved comics." ) self.bulk_folders_create(create_folder_paths, status) self.status_controller.finish(status) def _prepare_moved_comic( self, comic, folder_m2m_links, updated_comics, del_folder_rows ) -> None: """Prepare one comic for bulk update.""" try: new_path = self.task.files_moved[comic.path] old_folder_pks = frozenset(folder.pk for folder in comic.folders.all()) comic.path = new_path new_path = Path(new_path) comic.parent_folder = Folder.objects.get(path=new_path.parent) comic.updated_at = Now() comic.presave() new_folder_pks = frozenset( Folder.objects.filter(path__in=new_path.parents).values_list( "pk", flat=True ) ) folder_m2m_links[comic.pk] = new_folder_pks updated_comics.append(comic) if del_folder_pks := old_folder_pks - new_folder_pks: for pk in del_folder_pks: del_folder_rows.append((comic.pk, pk)) except Exception: self.log.exception(f"moving {comic.path}") def _bulk_comics_move_prepare(self) -> tuple[list, dict, dict]: """Prepare Update Comics.""" comics = ( Comic.objects.prefetch_related(FOLDERS_FIELD_NAME) .filter(library=self.library, path__in=self.task.files_moved.keys()) .only(PATH_FIELD_NAME, PARENT_FOLDER_FIELD_NAME, FOLDERS_FIELD_NAME) ) folder_m2m_links = {} updated_comics = [] del_folder_rows = [] for comic in comics: self._prepare_moved_comic( comic, folder_m2m_links, updated_comics, del_folder_rows ) self.task.files_moved = {} self.log.debug(f"Prepared {len(updated_comics)} for move...") del_rows_map = {} if del_folder_rows: del_rows_map[FOLDERS_FIELD_NAME] = del_folder_rows return updated_comics, folder_m2m_links, del_rows_map def bulk_comics_moved(self) -> int: """Move comcis.""" num_files_moved = len(self.task.files_moved) status = ImporterMoveComicsStatus(0, num_files_moved) try: if not num_files_moved: return 0 self.status_controller.start(status) # Prepare self._bulk_comics_moved_ensure_folders() updated_comics, folder_m2m_links, del_rows_map = ( self._bulk_comics_move_prepare() ) # Update comics # Potentially could just add these to the right structures and do it later during create and link. Comic.objects.bulk_update(updated_comics, MOVED_BULK_COMIC_UPDATE_FIELDS) if del_rows_map: self.delete_m2m_field(FOLDERS_FIELD_NAME, del_rows_map, status) if folder_m2m_links: self.link_comic_m2m_field(FOLDERS_FIELD_NAME, folder_m2m_links, status) count = len(updated_comics) finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/moved/covers.py ================================================ """Bulk import and move covers.""" from pathlib import Path from django.db.models.functions import Now from codex.librarian.scribe.importer.const import ( CLASS_CUSTOM_COVER_GROUP_MAP, CUSTOM_COVER_UPDATE_FIELDS, LINK_COVER_PKS, ) from codex.librarian.scribe.importer.moved.comics import MovedComicsImporter from codex.librarian.scribe.importer.statii.moved import ImporterMoveCoversStatus from codex.models import CustomCover class MovedCoversImporter(MovedComicsImporter): """Methods for moving comics and folders.""" def _bulk_covers_moved_prepare(self, status) -> tuple[list, set]: """Create an update map for bulk update.""" covers = CustomCover.objects.filter( library=self.library, path__in=self.task.covers_moved.keys() ).only("pk", "path") if status: status.total = covers.count() moved_covers = [] unlink_pks = set() for cover in covers.iterator(): try: new_path = self.task.covers_moved[cover.path] cover.path = new_path new_path = Path(new_path) cover.updated_at = Now() cover.presave() moved_covers.append(cover) unlink_pks.add(cover.pk) except Exception: self.log.exception(f"moving {cover.path}") return moved_covers, unlink_pks def _bulk_covers_moved_unlink(self, unlink_pks) -> None: """Unlink moved covers because they could have moved between group dirs.""" if not unlink_pks: return self.log.debug(f"Unlinking {len(unlink_pks)} moved custom covers.") for model in CLASS_CUSTOM_COVER_GROUP_MAP: groups = model.objects.filter(custom_cover__in=unlink_pks) unlink_groups = [] for group in groups: group.custom_cover = None unlink_groups.append(group) if unlink_groups: model.objects.bulk_update(unlink_groups, ["custom_cover"]) self.log.debug( f"Unlinked {len(unlink_groups)} {model.__name__} moved custom covers." ) self.remove_covers(unlink_pks, custom=True) def bulk_covers_moved(self, status=None) -> int: """Move covers.""" num_covers_moved = len(self.task.covers_moved) status = ImporterMoveCoversStatus(None, num_covers_moved) try: if not num_covers_moved: return 0 self.status_controller.start(status) moved_covers, unlink_pks = self._bulk_covers_moved_prepare(status) if LINK_COVER_PKS not in self.metadata: self.metadata[LINK_COVER_PKS] = set() self.metadata[LINK_COVER_PKS].update(unlink_pks) if moved_covers: CustomCover.objects.bulk_update( moved_covers, CUSTOM_COVER_UPDATE_FIELDS ) self._bulk_covers_moved_unlink(unlink_pks) count = len(moved_covers) level = "INFO" if count else "DEBUG" self.log.log(level, f"Moved {count} custom covers.") finally: self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/moved/folders.py ================================================ """Bulk import and move comics and folders.""" from pathlib import Path from bidict import bidict, frozenbidict from django.db.models.functions import Now from codex.librarian.scribe.importer.const import ( BULK_UPDATE_FOLDER_FIELDS, ) from codex.librarian.scribe.importer.moved.covers import MovedCoversImporter from codex.librarian.scribe.importer.statii.create import ImporterCreateTagsStatus from codex.librarian.scribe.importer.statii.moved import ImporterMoveFoldersStatus from codex.librarian.status import Status from codex.models import Folder class MovedFoldersImporter(MovedCoversImporter): """Methods for moving comics and folders.""" @staticmethod def _folder_sort_key(element: Folder) -> int: return len(Path(element.path).parts) def _bulk_move_folders( self, src_folder_paths_with_existing_dest_parents, dest_parent_folders_map, dirs_moved: frozenbidict[str, str], status: Status, ): """Bulk move folders.""" # Move collisions removed before this folders_to_move = ( Folder.objects.filter( library=self.library, path__in=src_folder_paths_with_existing_dest_parents, ) .only(*BULK_UPDATE_FOLDER_FIELDS) .order_by("path") ) new_paths = set() update_folders = [] for folder in folders_to_move.iterator(): new_path = dirs_moved[folder.path] new_paths.add(new_path) folder.name = Path(new_path).name folder.path = new_path parent_path_str = str(Path(new_path).parent) folder.parent_folder = dest_parent_folders_map.get(parent_path_str) folder.presave() folder.updated_at = Now() update_folders.append(folder) update_folders = sorted(update_folders, key=self._folder_sort_key) Folder.objects.bulk_update(update_folders, BULK_UPDATE_FOLDER_FIELDS) count = len(update_folders) level = "INFO" if count else "DEBUG" self.log.log(level, f"Moved {count} folders.") status.increment_complete(count) self.status_controller.update(status) return count def _bulk_move_folders_under_existing_parents( self, dest_parent_folder_paths_map, dirs_moved: frozenbidict[str, str], status ) -> int: """Move folders under existing folders.""" count = 0 while True: # Get existing parent folders dest_parent_paths = tuple(dest_parent_folder_paths_map.keys()) extant_parent_folders = Folder.objects.filter( library=self.library, path__in=dest_parent_paths ).only("path") # Create a list of folders than can be moved under existing folders. dest_parent_folders_map = {} src_folder_paths_with_existing_dest_parents = [] for extant_parent_folder in extant_parent_folders: dest_parent_folders_map[extant_parent_folder.path] = ( extant_parent_folder ) if dest_folder_paths := dest_parent_folder_paths_map.pop( extant_parent_folder.path, None ): for dest_folder_path in dest_folder_paths: src_path = dirs_moved.inverse[dest_folder_path] src_folder_paths_with_existing_dest_parents.append(src_path) if not src_folder_paths_with_existing_dest_parents: return count src_folder_paths_with_existing_dest_parents = sorted( src_folder_paths_with_existing_dest_parents ) self.log.debug( f"Moving folders under existing parents: {src_folder_paths_with_existing_dest_parents}" ) count += self._bulk_move_folders( src_folder_paths_with_existing_dest_parents, dest_parent_folders_map, dirs_moved, status, ) def _get_move_create_folders_one_layer( self, dest_parent_folder_paths_map ) -> frozenset: """Find the next layer of folder paths to create.""" create_folder_paths_one_layer = set() library_parts_len = len(Path(self.library.path).parts) for parent_path_str in sorted(dest_parent_folder_paths_map.keys()): parts = Path(parent_path_str).parts # Get one layer of folders from existing layers. possible_create_folder_path = "" for index in range(library_parts_len, len(parts) + 1): layer_parts = parts[:index] possible_create_folder_path = str(Path(*layer_parts)) if ( possible_create_folder_path in create_folder_paths_one_layer or not Folder.objects.filter( library=self.library, path=possible_create_folder_path ).exists() ): break else: possible_create_folder_path = "" if possible_create_folder_path: create_folder_paths_one_layer.add(possible_create_folder_path) return frozenset(create_folder_paths_one_layer) def _remove_move_collisions(self, dirs_moved: bidict[str, str]) -> None: """Remove moves that would collide with an existing Folder.""" dest_paths = set(dirs_moved.values()) collision_dest_paths = Folder.objects.filter( library=self.library, path__in=dest_paths ).values_list("path", flat=True) if not collision_dest_paths: return collision_dest_paths = sorted(set(collision_dest_paths)) for collision_dest_path in collision_dest_paths: dirs_moved.inverse.pop(collision_dest_path, None) self.log.warning( f"Not moving folders to destinations that would collide with existing database folders: {collision_dest_paths}" ) def _bulk_move_folders_and_create_parents(self, status) -> int: """Find folders that can be moved without creating parents.""" count = 0 dirs_moved = bidict(self.task.dirs_moved) self._remove_move_collisions(dirs_moved) dest_paths = sorted(set(dirs_moved.values())) dest_parent_folder_paths_map = {} for dest_path in dest_paths: parent = str(Path(dest_path).parent) if parent not in dest_parent_folder_paths_map: dest_parent_folder_paths_map[parent] = set() dest_parent_folder_paths_map[parent].add(dest_path) create_status = ImporterCreateTagsStatus() layer = 1 while True: self._bulk_move_folders_under_existing_parents( dest_parent_folder_paths_map, frozenbidict(dirs_moved), status ) # All folders movable without creation have moved. if not dest_parent_folder_paths_map: break self.log.debug( f"Creating intermediate folder layer {layer} to move folders." ) create_folder_paths_one_layer = self._get_move_create_folders_one_layer( dest_parent_folder_paths_map ) # Create one layer of folders count += self.bulk_folders_create( create_folder_paths_one_layer, create_status ) layer += 1 return count def bulk_folders_moved(self, *, mark_in_progress=False) -> int: """Move folders in the database instead of recreating them.""" count = 0 num_dirs_moved = len(self.task.dirs_moved) status = ImporterMoveFoldersStatus(None, num_dirs_moved) try: if not num_dirs_moved: return count if mark_in_progress: self.library.start_update() self.status_controller.start(status) count += self._bulk_move_folders_and_create_parents(status) self.task.dirs_moved = {} finally: self.status_controller.finish(status) if mark_in_progress: self.library.end_update() return count ================================================ FILE: codex/librarian/scribe/importer/query/__init__.py ================================================ """Query missing foreign keys.""" from codex.librarian.scribe.importer.const import ( CREATE_FKS, DELETE_M2MS, FTS_EXISTING_M2MS, QUERY_MODELS, UPDATE_COMICS, UPDATE_FKS, ) from codex.librarian.scribe.importer.query.links import QueryPruneLinks from codex.librarian.scribe.importer.statii.query import QUERY_STATII class QueryForeignKeysImporter(QueryPruneLinks): """Methods for querying missing fks.""" def query(self) -> None: """Get objects to create by querying existing objects for the proposed fks.""" if QUERY_MODELS not in self.metadata: return self.metadata[UPDATE_COMICS] = {} self.metadata[CREATE_FKS] = {} self.metadata[UPDATE_FKS] = {} self.metadata[DELETE_M2MS] = {} self.metadata[FTS_EXISTING_M2MS] = {} self.log.debug( f"Querying existing foreign keys for comics in {self.library.path}" ) try: if self.abort_event.is_set(): return self.query_all_missing_models() if self.abort_event.is_set(): return self.query_update_comics() if self.abort_event.is_set(): return self.query_prune_comic_links() if self.abort_event.is_set(): return self.query_missing_custom_covers() finally: self.status_controller.finish_many(QUERY_STATII) ================================================ FILE: codex/librarian/scribe/importer/query/covers.py ================================================ """Query Missing Custom Covers.""" from codex.librarian.scribe.importer.const import ( CREATE_COVERS, UPDATE_COVERS, ) from codex.librarian.scribe.importer.create import CreateForeignKeysImporter from codex.librarian.scribe.importer.statii.create import ( ImporterCreateCoversStatus, ImporterUpdateCoversStatus, ) from codex.librarian.scribe.importer.statii.query import ( ImporterQueryMissingCoversStatus, ) from codex.models.paths import CustomCover class QueryCustomCoversImporter(CreateForeignKeysImporter): """Query Missing Custom Covers.""" def query_missing_custom_covers(self) -> None: """Identify update & create covers.""" cover_paths = self.task.covers_created | self.task.covers_modified num_cover_paths = len(cover_paths) status = ImporterQueryMissingCoversStatus(None, num_cover_paths) if not num_cover_paths: self.status_controller.finish(status) return self.log.debug(f"Querying {num_cover_paths} custom cover_paths") self.status_controller.start(status) update_covers_qs = CustomCover.objects.filter( library=self.library, path__in=cover_paths ) self.metadata[UPDATE_COVERS] = update_covers_qs update_cover_paths = frozenset(update_covers_qs.values_list("path", flat=True)) update_count = len(update_cover_paths) update_status = ImporterUpdateCoversStatus(0, update_count) self.status_controller.update(update_status) create_cover_paths = cover_paths - update_cover_paths self.metadata[CREATE_COVERS] = create_cover_paths create_count = len(create_cover_paths) create_status = ImporterCreateCoversStatus(0, create_count) self.status_controller.update(create_status) self.task.covers_created = self.task.covers_modified = frozenset() count = create_count + update_count if count: self.log.debug( f"Discovered {update_count} custom covers to update and {create_count} to create." ) self.status_controller.finish(status) return ================================================ FILE: codex/librarian/scribe/importer/query/filters.py ================================================ """Query the missing foreign keys methods.""" from django.db.models.query_utils import Q from codex.librarian.scribe.importer.const import DictModelType from codex.librarian.scribe.importer.query.covers import QueryCustomCoversImporter from codex.models.base import BaseModel from codex.models.groups import BrowserGroupModel class QueryForeignKeysFilterImporter(QueryCustomCoversImporter): """Query the missing foreign keys methods.""" @staticmethod def _get_query_missing_simple_filter( key_rels: tuple[str, ...], key_values: tuple[str, ...], ) -> Q: filter_args = {} rel = key_rels[0] values = frozenset(key[0] for key in key_values if key[0]) filter_args[f"{rel}__in"] = values return Q(**filter_args) @staticmethod def _query_missing_complex_model_filter( key_rels: tuple[str, ...], key_values: tuple[tuple[str | None, ...], ...], ) -> Q: """Add value filter to query filter map.""" query_filter = Q() for keys in key_values: filter_dict = {} for rel, value in zip(key_rels, keys, strict=False): # for credit val = value[0] if isinstance(value, tuple) else value if val is None: final_rel = rel + "__isnull" val = True else: final_rel = rel filter_dict[final_rel] = val query_filter |= Q(**filter_dict) return query_filter def query_missing_model_filter( self, model: type[BaseModel], key_rels: tuple[str, ...], key_value_tuples: tuple, ) -> Q: """Get filters for the model.""" if issubclass(model, DictModelType | BrowserGroupModel): fk_filter = self._query_missing_complex_model_filter( key_rels, key_value_tuples ) else: fk_filter = self._get_query_missing_simple_filter( key_rels, key_value_tuples ) return fk_filter ================================================ FILE: codex/librarian/scribe/importer/query/foreign_keys.py ================================================ """Query the missing foreign keys methods.""" from codex.librarian.scribe.importer.const import ( CREATE_FKS, FK_KEYS, MODEL_REL_MAP, MODEL_SELECT_RELATED, QUERY_MODELS, TOTAL, UPDATE_FKS, get_key_index, ) from codex.librarian.scribe.importer.query.update_fks import QueryIsUpdateImporter from codex.librarian.scribe.importer.statii.create import ( ImporterCreateTagsStatus, ImporterUpdateTagsStatus, ) from codex.librarian.scribe.importer.statii.query import ImporterQueryMissingTagsStatus from codex.librarian.status import Status from codex.models.base import BaseModel from codex.models.named import Universe from codex.settings import IMPORTER_FILTER_BATCH_SIZE from codex.util import flatten class QueryForeignKeysQueryImporter(QueryIsUpdateImporter): """Query the missing foreign keys methods.""" def query_existing_mds( self, model: type[BaseModel], batch_proposed_key_tuples ) -> dict: """Query existing metadata tables.""" key_rels: tuple[str, ...] = MODEL_REL_MAP[model][0] fk_filter = self.query_missing_model_filter( model, key_rels, batch_proposed_key_tuples, ) rels = MODEL_REL_MAP[model] select_related = MODEL_SELECT_RELATED.get(model, ()) fields = tuple(filter(bool, flatten(rels))) qs = model.objects qs = qs.select_related(*select_related) qs = qs.filter(fk_filter).distinct().values_list(*fields) extra_index = get_key_index(model) has_id = bool(rels[1]) existing_mds = {} for existing_values in qs: key = existing_values[:extra_index] value = existing_values[extra_index:] if has_id: identifier = value[:3] if any(value[:3]) else None value = (identifier, *value[3:]) existing_mds[key] = value return existing_mds def _query_missing_models_batch( self, model: type[BaseModel], start: int, all_proposed_key_values: tuple, proposed_values_map: dict[tuple, set[tuple]], create_values: set[tuple], update_values: set[tuple], fts_values: dict[tuple, tuple], status: Status, ) -> None: # Do this in batches so as not to exceed the 1k sqlite query depth limit end = start + IMPORTER_FILTER_BATCH_SIZE batch_proposed_key_tuples = all_proposed_key_values[start:end] num_in_batch = len(batch_proposed_key_tuples) existing_values_map = self.query_existing_mds(model, batch_proposed_key_tuples) for key_values in batch_proposed_key_tuples: proposed_extra_values_set = proposed_values_map.pop(key_values) exists = key_values in existing_values_map existing_extra_values = existing_values_map.pop(key_values, None) do_update, best_extra_values = self.query_model_best_extra_values( model, existing_extra_values, proposed_extra_values_set ) best_values = (*key_values, *best_extra_values) if exists: if do_update: update_values.add(best_values) else: create_values.add(best_values) if model is Universe: fts_values[key_values] = best_extra_values status.increment_complete(num_in_batch) self.status_controller.update(status) def _finish_query_missing( self, model: type[BaseModel], values: set | frozenset | dict, key: str, title: str, ) -> None: if values: fks = self.metadata[key] if isinstance(values, dict): if model not in fks: fks[model] = {} fks[model].update(values) else: if model not in fks: fks[model] = set() fks[model] |= values level = "INFO" else: level = "DEBUG" num = len(values) match key: case FK_KEYS.CREATE_FKS: verb = "create" case FK_KEYS.UPDATE_FKS: verb = "update" case _: verb = "" if verb: self.log.log(level, f"Prepared {num} {title} for {verb}.") def query_missing_models( self, model: type[BaseModel], proposed_values_map: dict[tuple, set[tuple]], status: Status, ) -> int: """Find missing foreign key models.""" if not proposed_values_map: return 0 num_all_proposed_values = len(proposed_values_map) proposed_key_values = tuple(proposed_values_map.keys()) start = 0 vnp = model._meta.verbose_name_plural title = vnp.title() if vnp else "" status.subtitle = title create_values = set() update_values = set() fts_values = {} while start < num_all_proposed_values: self._query_missing_models_batch( model, start, proposed_key_values, proposed_values_map, create_values, update_values, fts_values, status, ) start += IMPORTER_FILTER_BATCH_SIZE self._finish_query_missing(model, create_values, CREATE_FKS, title) self._finish_query_missing(model, update_values, UPDATE_FKS, title) status.subtitle = "" return num_all_proposed_values def _query_missing_model(self, model: type[BaseModel], status: Status) -> int: """Find missing model and update create and update sets.""" count = 0 proposed_values = self.metadata[QUERY_MODELS].pop(model, None) if not proposed_values: return count # Finally run the query and get only the correct create_objs return self.query_missing_models( model, proposed_values, status, ) def _set_fk_totals(self, fk_key: str, status_class) -> None: total_fks = 0 fks = self.metadata[fk_key] for rows in fks.values(): total_fks += len(rows) status = status_class(None, total_fks) self.status_controller.update(status, notify=False) self.metadata[fk_key][TOTAL] = total_fks def query_all_missing_models(self): """Find all missing foreign key models.""" num_models = self.sum_ops(QUERY_MODELS) status = ImporterQueryMissingTagsStatus(0, num_models) try: if not num_models: return num_models self.status_controller.start(status) for model in tuple(self.metadata[QUERY_MODELS].keys()): if self.abort_event.is_set(): return num_models self._query_missing_model(model, status) self.metadata.pop(QUERY_MODELS) self._set_fk_totals(CREATE_FKS, ImporterCreateTagsStatus) self._set_fk_totals(UPDATE_FKS, ImporterUpdateTagsStatus) finally: self.status_controller.finish(status) return status.complete ================================================ FILE: codex/librarian/scribe/importer/query/links.py ================================================ """Prune link actions.""" from codex.librarian.scribe.importer.const import ( LINK_FKS, LINK_M2MS, ) from codex.librarian.scribe.importer.query.links_m2m import QueryPruneLinksM2M from codex.librarian.scribe.importer.statii.query import ImporterQueryTagLinksStatus class QueryPruneLinks(QueryPruneLinksM2M): """Prune link actions.""" def query_prune_comic_links(self) -> None: """Prune links that don't need updating.""" total_query_ops = self.sum_path_ops(LINK_FKS) + self.sum_path_ops(LINK_M2MS) status = ImporterQueryTagLinksStatus(0, total_query_ops) try: if not total_query_ops: return self.status_controller.start(status) if self.abort_event.is_set(): return self.query_prune_comic_fk_links(status) if self.abort_event.is_set(): return self.query_prune_comic_m2m_links(status) finally: self.status_controller.finish(status) ================================================ FILE: codex/librarian/scribe/importer/query/links_fk.py ================================================ """Prune M2O links that don't need updating.""" from codex.librarian.scribe.importer.const import ( COMIC_FK_FIELD_NAMES, FIELD_NAME_KEYS_REL_MAP, LINK_FKS, PATH_FIELD_NAME, ) from codex.librarian.scribe.importer.query.update_comics import QueryUpdateComics from codex.models.comic import Comic from codex.settings import IMPORTER_LINK_FK_BATCH_SIZE _QUERY_LINK_FK_PRUNE_ONLY = ( PATH_FIELD_NAME, *COMIC_FK_FIELD_NAMES, ) class QueryPruneLinksFKs(QueryUpdateComics): """Prune M2O links that don't need updating.""" def pop_links_to_fts(self, path, field_name) -> None: """Pop a link to the FTS structure.""" link_key = self.metadata[LINK_FKS][path].pop(field_name) self.add_links_to_fts(path, field_name, (link_key,)) def _query_prune_comic_fk_links_protagonist( self, comic: Comic, path: str, field_name: str, key_values: tuple ) -> None: prot = key_values[0] for obj in (comic.main_character, comic.main_team): if obj and obj.name == prot: self.metadata[LINK_FKS][path].pop(field_name) break @staticmethod def _query_prune_comic_fk_links_key_equal(field_obj, key_rel, key_value) -> bool: parts = key_rel.split("__") rel_obj = field_obj key_val = None while parts: key_val = getattr(rel_obj, parts.pop(0), None) rel_obj = key_val return key_val == key_value def _query_prune_comic_fk_links_field(self, comic, path, field_name) -> None: link_dict = self.metadata[LINK_FKS].get(path) key_values = link_dict[field_name] if field_name == "protagonist": self._query_prune_comic_fk_links_protagonist( comic, path, field_name, key_values ) return key_rels = FIELD_NAME_KEYS_REL_MAP[field_name] keys_equal = True field_obj = getattr(comic, field_name) for key_rel, key_value in zip(key_rels, key_values, strict=True): keys_equal = self._query_prune_comic_fk_links_key_equal( field_obj, key_rel, key_value ) if not keys_equal: break if keys_equal: self.metadata[LINK_FKS][path].pop(field_name) def _query_prune_comic_fk_links_comic(self, comic, status) -> None: path = comic.path path_link_fks = self.metadata[LINK_FKS].get(path) if path_link_fks is None: self.log.error( f"Tried to link foreign keys to path that's not in the LINK_FKS metadata {path}" ) return field_names = tuple(path_link_fks.keys()) for field_name in field_names: self._query_prune_comic_fk_links_field(comic, path, field_name) status.increment_complete() self.status_controller.update(status) if not self.metadata[LINK_FKS][path]: del self.metadata[LINK_FKS][path] def _query_prune_comic_fk_links_batch( self, batch_paths: tuple[str, ...], status ) -> None: comics = ( Comic.objects.filter(library=self.library, path__in=batch_paths) .select_related(*COMIC_FK_FIELD_NAMES) .only(*_QUERY_LINK_FK_PRUNE_ONLY) ) for comic in comics: self._query_prune_comic_fk_links_comic(comic, status) def query_prune_comic_fk_links(self, status) -> None: """Prune comic fk links that already exist.""" status.subtitle = "Many to One" self.status_controller.update(status) paths = tuple(self.metadata[LINK_FKS].keys()) num_paths = len(paths) start = 0 while start < num_paths: if self.abort_event.is_set(): return end = start + IMPORTER_LINK_FK_BATCH_SIZE batch_paths = paths[start:end] self._query_prune_comic_fk_links_batch(batch_paths, status) start += IMPORTER_LINK_FK_BATCH_SIZE ================================================ FILE: codex/librarian/scribe/importer/query/links_m2m.py ================================================ """Prune M2M links that don't need updating.""" from codex.librarian.scribe.importer.const import ( COMIC_M2M_FIELD_NAMES, DELETE_M2MS, FIELD_NAME_KEY_ATTRS_MAP, LINK_M2MS, ) from codex.librarian.scribe.importer.query.links_fk import QueryPruneLinksFKs from codex.models.base import BaseModel, NamedModel from codex.models.comic import Comic from codex.models.groups import BrowserGroupModel from codex.settings import ( IMPORTER_LINK_FK_BATCH_SIZE, IMPORTER_LINK_M2M_BATCH_SIZE, ) class QueryPruneLinksM2M(QueryPruneLinksFKs): """Prune M2M links that don't need updating.""" @staticmethod def _m2m_obj_to_key_tuple(key_attrs: tuple[str, ...], m2m_obj: BaseModel) -> tuple: """Create a key value tuple from a db obj.""" value_tuple = [] for attr in key_attrs: value = getattr(m2m_obj, attr) if isinstance(value, NamedModel | BrowserGroupModel): value = value.name value_tuple.append(value) return tuple(value_tuple) def _query_prune_comic_m2m_links_field_obj( self, comic: Comic, field_name: str, m2m_obj: BaseModel, kept_existing_values: set, proposed_values: set, ) -> bool: """Remove existing m2m links from the action list and add missing ones delete list.""" # transform objs into tuples key_attrs = FIELD_NAME_KEY_ATTRS_MAP[field_name] existing_key_value_tuple = self._m2m_obj_to_key_tuple(key_attrs, m2m_obj) if existing_key_value_tuple in proposed_values: # If already linked correctly, remove from action set kept_existing_values.add(existing_key_value_tuple) proposed_values.discard(existing_key_value_tuple) deleted = False else: # Delete existing m2m links that aren't in the new comic delete_m2m = (comic.pk, m2m_obj.pk) self.metadata[DELETE_M2MS][field_name].add(delete_m2m) deleted = True return deleted def _query_prune_comic_m2m_links_field( self, comic: Comic, field_name: str, status ) -> None: if field_name not in self.metadata[DELETE_M2MS]: self.metadata[DELETE_M2MS][field_name] = set() m2m_objs = getattr(comic, field_name).all() deleted = False kept_existing_values = set() proposed_values = self.metadata[LINK_M2MS][comic.path][field_name] for m2m_obj in m2m_objs: deleted |= self._query_prune_comic_m2m_links_field_obj( comic, field_name, m2m_obj, kept_existing_values, proposed_values ) status.increment_complete() self.status_controller.update(status) if kept_existing_values and (deleted or proposed_values): self.add_to_fts_existing(comic.pk, field_name, tuple(kept_existing_values)) if not self.metadata[LINK_M2MS][comic.path][field_name]: del self.metadata[LINK_M2MS][comic.path][field_name] def _query_prune_comic_m2m_links_comic(self, comic: Comic, status) -> None: field_names = tuple(self.metadata[LINK_M2MS][comic.path].keys()) for field_name in field_names: self._query_prune_comic_m2m_links_field(comic, field_name, status) if not self.metadata[LINK_M2MS][comic.path]: del self.metadata[LINK_M2MS][comic.path] def _query_prune_comic_m2m_links_batch(self, paths: tuple[str], status) -> None: comics = ( Comic.objects.filter(library=self.library, path__in=paths) .prefetch_related(*COMIC_M2M_FIELD_NAMES) .only(*COMIC_M2M_FIELD_NAMES) ) for comic in comics.iterator(chunk_size=IMPORTER_LINK_M2M_BATCH_SIZE): self._query_prune_comic_m2m_links_comic(comic, status) def query_prune_comic_m2m_links(self, status) -> None: """Prune comic m2m links that already exists or should be deleted.""" status.subtitle = "Many to Many" self.status_controller.update(status) paths = tuple(self.metadata[LINK_M2MS].keys()) # Batch path__in to stay under SQLite's variable limit. for start in range(0, len(paths), IMPORTER_LINK_FK_BATCH_SIZE): if self.abort_event.is_set(): return batch_paths = paths[start : start + IMPORTER_LINK_FK_BATCH_SIZE] self._query_prune_comic_m2m_links_batch(batch_paths, status) field_names = tuple(self.metadata[DELETE_M2MS].keys()) for field_name in field_names: rows = self.metadata[DELETE_M2MS][field_name] if not rows: del self.metadata[DELETE_M2MS][field_name] status.increment_complete(len(field_names)) self.status_controller.update(status) ================================================ FILE: codex/librarian/scribe/importer/query/update_comics.py ================================================ """Move comics that need only updating into correct structure.""" from codex.librarian.scribe.importer.const import ( BULK_UPDATE_COMIC_FIELDS, CREATE_COMICS, UPDATE_COMICS, ) from codex.librarian.scribe.importer.query.foreign_keys import ( QueryForeignKeysQueryImporter, ) from codex.librarian.scribe.importer.statii.create import ( ImporterCreateComicsStatus, ImporterUpdateComicsStatus, ) from codex.librarian.scribe.importer.statii.query import ImporterQueryComicUpdatesStatus from codex.librarian.status import Status from codex.models import Comic class QueryUpdateComics(QueryForeignKeysQueryImporter): """Move comics that need only updating into correct structure.""" def _query_update_comic(self, comic: Comic, status: Status) -> None: """Query for update one comic.""" proposed_comic_dict = self.metadata[CREATE_COMICS].pop(comic.path, None) if not proposed_comic_dict: self.log.warning( f"{comic.path} can be updated, but the update metadata was not found." ) return update_comic_dict = { key: value for key, value in proposed_comic_dict.items() if getattr(comic, key) != value } # Update even if update_comic_dict is empty to set stat self.metadata[UPDATE_COMICS][comic.pk] = update_comic_dict status.increment_complete() self.status_controller.update(status) def query_update_comics(self) -> None: """Pop existing comics from create & move to update if needed.""" paths = tuple(self.metadata[CREATE_COMICS].keys()) status = ImporterQueryComicUpdatesStatus(0, len(paths)) try: if not paths: return self.status_controller.start(status) comics = Comic.objects.filter(library=self.library, path__in=paths).only( *BULK_UPDATE_COMIC_FIELDS ) status.increment_complete(len(paths) - comics.count()) for comic in comics: self._query_update_comic(comic, status) if self.abort_event.is_set(): return create_status = ImporterCreateComicsStatus( None, len(self.metadata[CREATE_COMICS]) ) self.status_controller.update(create_status) update_status = ImporterUpdateComicsStatus( None, len(self.metadata[UPDATE_COMICS]) ) self.status_controller.update(update_status) finally: self.status_controller.finish(status) ================================================ FILE: codex/librarian/scribe/importer/query/update_fks.py ================================================ """Query the missing foreign keys methods.""" from comicbox.enums.comicbox import compare_identifier_source from codex.librarian.scribe.importer.const import MODEL_REL_MAP from codex.librarian.scribe.importer.query.filters import ( QueryForeignKeysFilterImporter, ) from codex.models.base import BaseModel class QueryIsUpdateImporter(QueryForeignKeysFilterImporter): """Query the missing foreign keys methods.""" @staticmethod def _query_missing_models_is_do_update_identifier( best_extra_values: tuple, proposed_extra_values: tuple, synthesized_extra_proposed_values: list, ) -> None: """Compare Identifiers.""" best_identifier = best_extra_values[0] if proposed_identifier := proposed_extra_values[0]: if best_identifier: best_id_source, _, best_id_key = best_identifier ( proposed_id_source, _, proposed_id_key, ) = proposed_identifier if ( compare_identifier_source(best_id_source, proposed_id_source) or best_id_key != proposed_id_key ): best_identifier = proposed_identifier else: best_identifier = proposed_identifier synthesized_extra_proposed_values.append(best_identifier) @staticmethod def _query_missing_models_is_do_update_extra( best_extra_values: tuple, proposed_extra_values: tuple, synthesized_extra_proposed_values: list, *, has_id: bool, ) -> None: """Compare extra relations.""" if has_id: best_extra_values = best_extra_values[1:] proposed_extra_values = proposed_extra_values[1:] synthesized_extra_values = [] for best_extra_value, proposed_extra_value in zip( best_extra_values, proposed_extra_values, strict=True ): if ( proposed_extra_value is not None and proposed_extra_value != best_extra_value ): synthesized_extra_values.append(proposed_extra_value) else: synthesized_extra_values.append(best_extra_value) synthesized_extra_proposed_values.extend(synthesized_extra_values) @staticmethod def _query_normalize_existing_values( existing_values_tuple: tuple, extra_index: int ) -> tuple: existing_identifier = existing_values_tuple[extra_index : extra_index + 3] if not any(existing_identifier): existing_identifier = None existing_extra_values = existing_values_tuple[extra_index + 3 :] return (existing_identifier, *existing_extra_values) @classmethod def _query_update_init_best_and_existing_values( cls, existing_extra_values_tuple: tuple | None, proposed_extra_values_tuples_set: set[tuple], ) -> tuple[tuple | None, tuple]: """ Initialize first best value. Normalize existing values and assign to best value OR initialize best values with first proposed value. """ if existing_extra_values_tuple: best_extra_values = existing_extra_values_tuple else: try: best_extra_values = proposed_extra_values_tuples_set.pop() except KeyError: best_extra_values = () return existing_extra_values_tuple, best_extra_values @classmethod def query_model_best_extra_values( cls, model: type[BaseModel], existing_extra_values_tuple: tuple | None, proposed_extra_values_tuples_set: set[tuple], ) -> tuple[bool, tuple]: """Find possible updates from existing.""" if not existing_extra_values_tuple and not proposed_extra_values_tuples_set: return False, () _, id_rel, *extra_rels = MODEL_REL_MAP[model] existing_extra_values, best_extra_values = ( cls._query_update_init_best_and_existing_values( existing_extra_values_tuple, proposed_extra_values_tuples_set, ) ) has_id = bool(id_rel) for extra_values_tuple in proposed_extra_values_tuples_set: synthesized_extra_proposed_values = [] if has_id: cls._query_missing_models_is_do_update_identifier( best_extra_values, extra_values_tuple, synthesized_extra_proposed_values, ) if extra_rels: cls._query_missing_models_is_do_update_extra( best_extra_values, extra_values_tuple, synthesized_extra_proposed_values, has_id=has_id, ) best_extra_values = tuple(synthesized_extra_proposed_values) do_update = best_extra_values != existing_extra_values return do_update, best_extra_values ================================================ FILE: codex/librarian/scribe/importer/read/__init__.py ================================================ """Extract and Aggregate metadata from comic archive.""" from codex.librarian.scribe.importer.read.extract import ( ExtractMetadataImporter, ) class ReadMetadataImporter(ExtractMetadataImporter): """Extract and Aggregate metadata from comics to prepare for importing.""" def read(self) -> None: """Extract and aggregate metadata.""" self.extract_metadata() self.aggregate_metadata() ================================================ FILE: codex/librarian/scribe/importer/read/aggregate_path.py ================================================ """Aggregate metadata from comics to prepare for importing.""" from comicbox.schemas.comicbox import ( COVER_DATE_KEY, DATE_KEY, NUMBER_KEY, STORE_DATE_KEY, SUFFIX_KEY, TITLE_KEY, ) from codex.librarian.scribe.importer.const import ( CREATE_COMICS, EXTRACTED, FIS, LINK_FKS, LINK_M2MS, QUERY_MODELS, ) from codex.librarian.scribe.importer.read.folders import AggregatePathMetadataImporter from codex.librarian.scribe.importer.statii.failed import ( ImporterFailedImportsQueryStatus, ) from codex.librarian.scribe.importer.statii.read import ImporterAggregateStatus _USED_COMICBOX_FIELDS = frozenset( { # "alternate_images", "age_rating", "arcs", # "bookmark", "characters", "collection_title", "country", "credits", # "credit_primaries", "critical_rating", "date", # "ext", "file_type", # extra "genres", "identifiers", # "identifier_primary_source", "imprint", "issue", "language", "locations", "metadata_mtime", # extra "monochrome", "notes", "original_format", "path", # extra # "pages", "page_count", "protagonist", # "prices", "publisher", "reading_direction", # "remainders", # "reprints", "review", # "rights", "scan_info", "series", "series_groups", "stories", "summary", "tagger", "tags", "teams", "title", "universes", # "updated_at", "volume", } ) class AggregateMetadataImporter(AggregatePathMetadataImporter): """Aggregate metadata from comics to prepare for importing.""" @staticmethod def _transform_metadata(md) -> None: for key in tuple(md.keys()): if key not in _USED_COMICBOX_FIELDS: md.pop(key, None) if date := md.pop(DATE_KEY, None): date.pop(COVER_DATE_KEY, None) date.pop(STORE_DATE_KEY, None) md.update(date) if issue := md.pop("issue", None): if number := issue.pop(NUMBER_KEY, None): md["issue_number"] = number if suffix := issue.pop(SUFFIX_KEY, None): md["issue_suffix"] = suffix if title := md.pop(TITLE_KEY, None): md["name"] = title def _aggregate_path(self, path, status) -> None: """Aggregate metadata for one path.""" # Prepare md = self.metadata[EXTRACTED].pop(path) self._transform_metadata(md) # Aggregate self.metadata[LINK_FKS][path] = {} self.get_fk_metadata(md, path) self.get_m2m_metadata(md, path) if md: self.get_path_metadata(md, path) self.metadata[CREATE_COMICS][str(path)] = md # Status status.increment_complete() self.status_controller.update(status) def aggregate_metadata( self, ): """Get aggregated metadata for the paths given.""" num_extracted_paths = len(self.metadata[EXTRACTED]) self.log.debug( f"Aggregating tags from {num_extracted_paths} comics in {self.library.path}..." ) status = ImporterAggregateStatus(0, num_extracted_paths) self.status_controller.start(status) # Init metadata, extract and aggregate self.metadata[QUERY_MODELS] = {} self.metadata[CREATE_COMICS] = {} self.metadata[LINK_FKS] = {} self.metadata[LINK_M2MS] = {} # Aggregate further for path in tuple(self.metadata[EXTRACTED]): if self.abort_event.is_set(): return status.complete self._aggregate_path(path, status) del self.metadata[EXTRACTED] fis = self.metadata[FIS].keys() # Set statii fi_status = ImporterFailedImportsQueryStatus(0, len(fis)) self.status_controller.update(fi_status, notify=False) self.status_controller.finish(status) return status.complete ================================================ FILE: codex/librarian/scribe/importer/read/const.py ================================================ """Aggregate Consts.""" from types import MappingProxyType from comicbox.schemas.comicbox import ( ARCS_KEY, CHARACTERS_KEY, DESIGNATION_KEY, GENRES_KEY, ID_KEY_KEY, ID_URL_KEY, IDENTIFIERS_KEY, LOCATIONS_KEY, NUMBER_KEY, PROTAGONIST_KEY, ROLES_KEY, SERIES_GROUPS_KEY, STORIES_KEY, TAGS_KEY, TEAMS_KEY, ) from django.db.models.fields import Field from codex.librarian.scribe.importer.const import ( ALL_COMIC_FK_FIELDS, CREDITS_FIELD_NAME, IDENTIFIERS_FIELD_NAME, NAME_FIELD_NAME, PROTAGONIST_FIELD_MODEL_MAP, STORY_ARC_NUMBERS_FIELD_NAME, UNIVERSES_FIELD_NAME, ) from codex.models import ( StoryArc, StoryArcNumber, Universe, ) from codex.models.identifier import ( Identifier, IdentifierSource, ) from codex.models.named import ( Character, CreditPerson, CreditRole, Genre, Location, SeriesGroup, Story, Tag, Team, ) ###### # FK # ###### _EXCLUDE_FIELD_NAMES = frozenset(PROTAGONIST_FIELD_MODEL_MAP.keys() | {"parent_folder"}) COMIC_FK_FIELD_NAMES_FIELD_MAP: MappingProxyType[str, Field] = MappingProxyType( { **{ field.name: field.related_model._meta.get_field(NAME_FIELD_NAME) for field in ALL_COMIC_FK_FIELDS if field.related_model and field.name not in _EXCLUDE_FIELD_NAMES }, PROTAGONIST_KEY: PROTAGONIST_FIELD_MODEL_MAP["main_character"]._meta.get_field( NAME_FIELD_NAME ), } ) COMIC_FK_FIELD_NAMES: frozenset[str] = frozenset(COMIC_FK_FIELD_NAMES_FIELD_MAP.keys()) ####### # M2M # ####### FIELD_NAME_TO_MD_KEY_MAP = MappingProxyType( { STORY_ARC_NUMBERS_FIELD_NAME: ARCS_KEY, } ) SIMPLE_KEY_CLASS_MAP = MappingProxyType( { SERIES_GROUPS_KEY: SeriesGroup, } ) IDENTIFIED_KEY_CLASS_MAP = MappingProxyType( { CHARACTERS_KEY: Character, GENRES_KEY: Genre, LOCATIONS_KEY: Location, STORIES_KEY: Story, TAGS_KEY: Tag, TEAMS_KEY: Team, } ) ID_TYPE_KEY = "id_type" # This map tells aggregator how to parse metadata into tuples for query & create. COMPLEX_FIELD_AGG_MAP: MappingProxyType[str, tuple] = MappingProxyType( { **{key: (cls.name, None, {}) for key, cls in SIMPLE_KEY_CLASS_MAP.items()}, **{ key: (cls.name, cls.identifier, {}) for key, cls in IDENTIFIED_KEY_CLASS_MAP.items() }, CREDITS_FIELD_NAME: ( CreditPerson.name, CreditPerson.identifier, { ROLES_KEY: CreditRole.name, }, ), IDENTIFIERS_FIELD_NAME: ( IdentifierSource.name, None, { ID_TYPE_KEY: "comic", ID_KEY_KEY: Identifier.key, ID_URL_KEY: Identifier.url, }, ), STORY_ARC_NUMBERS_FIELD_NAME: ( StoryArc.name, StoryArc.identifier, { NUMBER_KEY: StoryArcNumber.number, }, ), UNIVERSES_FIELD_NAME: ( Universe.name, None, { IDENTIFIERS_KEY: Universe.identifier, DESIGNATION_KEY: Universe.designation, }, ), } ) ================================================ FILE: codex/librarian/scribe/importer/read/extract.py ================================================ """Extract metadata from comic archive.""" from datetime import UTC, datetime from tarfile import TarError from types import MappingProxyType from zipfile import BadZipFile, LargeZipFile from comicbox.box import Comicbox from comicbox.exceptions import UnsupportedArchiveTypeError from comicbox.schemas.comicbox import PAGE_COUNT_KEY from py7zr.exceptions import ArchiveError as Py7zError from rarfile import Error as RarError from codex.choices.admin import AdminFlagChoices from codex.librarian.scribe.importer.const import EXTRACTED, FIS, SKIPPED from codex.librarian.scribe.importer.read.aggregate_path import ( AggregateMetadataImporter, ) from codex.librarian.scribe.importer.statii.read import ImporterReadComicsStatus from codex.models.admin import AdminFlag from codex.models.comic import Comic from codex.settings import COMICBOX_CONFIG class ExtractMetadataImporter(AggregateMetadataImporter): """Aggregate metadata from comics to prepare for importing.""" @staticmethod def _old_comic_values( old_comic_values: MappingProxyType, path: str ) -> tuple[str | None, int | None, datetime | None]: old_comic = old_comic_values.get(path, {}) old_file_type = old_comic.get("file_type") old_page_count = old_comic.get(PAGE_COUNT_KEY) old_mtime = old_comic.get("metadata_mtime") if old_mtime and ( old_mtime.tzinfo is None or old_mtime.tzinfo.utcoffset(old_mtime) is None ): old_mtime = old_mtime.replace(tzinfo=UTC) return old_file_type, old_page_count, old_mtime def _set_import_metadata_flag(self) -> bool: """Set import_metadata flag.""" if self.task.force_import_metadata: import_metadata = True else: key = AdminFlagChoices.IMPORT_METADATA.value import_metadata = AdminFlag.objects.only("on").get(key=key).on if not import_metadata: self.log.warning("Admin flag set to NOT import metadata.") return import_metadata def _extract_path_comicbox( self, path: str, old_comic_values: MappingProxyType, *, import_metadata: bool, ) -> dict: old_file_type, old_page_count, old_mtime = self._old_comic_values( old_comic_values, path ) md = {} with Comicbox(path, config=COMICBOX_CONFIG, logger=self.log) as cb: if import_metadata: new_md_mtime = cb.get_metadata_mtime() if ( not self.task.check_metadata_mtime or not new_md_mtime or not old_mtime or (new_md_mtime > old_mtime) ): md = cb.to_dict() md = md.get("comicbox", {}) md["metadata_mtime"] = new_md_mtime else: md["page_count"] = cb.get_page_count() else: md["page_count"] = cb.get_page_count() file_type = cb.get_file_type() if old_page_count == md.get("page_count"): md.pop("page_count") if old_file_type != file_type: md["file_type"] = file_type if md: md["path"] = path return md def _extract_path( self, path: str, old_comic_values: MappingProxyType, *, import_metadata: bool ) -> dict: """Extract metadata from comic and clean it for codex.""" md = {} failed_import = {} try: md = self._extract_path_comicbox( path, old_comic_values, import_metadata=import_metadata ) except ( UnsupportedArchiveTypeError, BadZipFile, LargeZipFile, RarError, Py7zError, TarError, OSError, ) as exc: self.log.warning(f"Failed to import {path}: {exc}") failed_import = {path: exc} except Exception as exc: self.log.exception(f"Failed to import: {path}") failed_import = {path: exc} if failed_import: self.metadata[FIS].update(failed_import) return md @staticmethod def _get_all_old_comic_values(all_paths: frozenset[str]) -> MappingProxyType: """Get some old comic values.""" old_comics = Comic.objects.filter(path__in=all_paths).values( "path", "metadata_mtime", "page_count", "file_type" ) old_comic_values = {} for old_comic in old_comics: old_path = old_comic.pop("path") old_comic_values[old_path] = old_comic return MappingProxyType(old_comic_values) def extract_metadata(self, status=None) -> int: """Extract comic metadata into memory.""" count = 0 self.metadata[SKIPPED] = set() self.metadata[EXTRACTED] = {} self.metadata[FIS] = {} all_paths = self.task.files_modified | self.task.files_created self.task.files_modified = frozenset() self.task.files_created = frozenset() total_paths = len(all_paths) status = ImporterReadComicsStatus(0, total_paths) try: if not total_paths: return count self.log.debug( f"Reading tags from {total_paths} comics in {self.library.path}..." ) self.status_controller.start(status, notify=True) # Set import_metadata flag import_metadata = self._set_import_metadata_flag() old_comic_values = self._get_all_old_comic_values(all_paths) for path in all_paths: if self.abort_event.is_set(): return count if md := self._extract_path( path, old_comic_values, import_metadata=import_metadata ): self.metadata[EXTRACTED][path] = md else: self.metadata[SKIPPED].add(path) status.increment_complete() self.status_controller.update(status) skipped_count = len(self.metadata[SKIPPED]) count = total_paths - skipped_count level = "INFO" if skipped_count else "DEBUG" self.log.log( level, f"Skipped {skipped_count} comics because metadata appears unchanged.", ) finally: self.metadata.pop(SKIPPED) self.status_controller.finish(status) return count ================================================ FILE: codex/librarian/scribe/importer/read/folders.py ================================================ """Aggregate folders and path from path.""" from collections.abc import Sequence, ValuesView from pathlib import Path from codex.librarian.scribe.importer.const import ( FOLDERS_FIELD_NAME, LINK_M2MS, PATH_FIELD_NAME, ) from codex.librarian.scribe.importer.read.many_to_many import ( AggregateManyToManyMetadataImporter, ) from codex.models.groups import Folder class AggregatePathMetadataImporter(AggregateManyToManyMetadataImporter): """Aggregate path metadata.""" def get_all_library_relative_paths( self, comic_paths: Sequence[Path | str] | ValuesView[str] ) -> set: """Get the proposed folder_paths.""" # also used by moved/comic.py:_bulk_comics_moved_ensure_folders() library_path = Path(self.library.path) proposed_folder_paths = set() for comic_path in comic_paths: for path in Path(comic_path).parents: if path.is_relative_to(library_path): proposed_folder_paths.add((str(path),)) return proposed_folder_paths def get_path_metadata(self, md: dict, path: Path | str) -> None: """Set the path metadata.""" proposed_folder_paths = self.get_all_library_relative_paths((path,)) for proposed_path in proposed_folder_paths: self.add_query_model(Folder, proposed_path) path_str = str(path) self.metadata[LINK_M2MS][path_str][FOLDERS_FIELD_NAME] = proposed_folder_paths md[PATH_FIELD_NAME] = path_str ================================================ FILE: codex/librarian/scribe/importer/read/foreign_keys.py ================================================ """Aggregate Browser Group Trees.""" from collections.abc import Mapping from contextlib import suppress from comicbox.enums.comicbox import IdSources from comicbox.fields.number_fields import PAGE_COUNT_KEY from comicbox.schemas.comicbox import ( ID_KEY_KEY, ID_URL_KEY, IDENTIFIERS_KEY, NAME_KEY, NUMBER_KEY, NUMBER_TO_KEY, PROTAGONIST_KEY, ) from django.db.models import Field from django.db.models.base import Model from codex.librarian.scribe.importer.const import ( GROUP_FIELD_NAMES, GROUP_FIELD_NAMES_SET, GROUP_MODEL_COUNT_FIELDS, LINK_FKS, QUERY_MODELS, ) from codex.librarian.scribe.importer.query import QueryForeignKeysImporter from codex.librarian.scribe.importer.read.const import ( COMIC_FK_FIELD_NAMES, COMIC_FK_FIELD_NAMES_FIELD_MAP, ) from codex.models.base import BaseModel from codex.models.groups import BrowserGroupModel, Volume from codex.models.identifier import Identifier, IdentifierSource from codex.util import max_none _MINIMAL_KEYS = frozenset({"file_type", PAGE_COUNT_KEY, "path"} | GROUP_FIELD_NAMES_SET) class AggregateForeignKeyMetadataImporter(QueryForeignKeysImporter): """Aggregate Browser Group Trees.""" def add_query_model( self, model: type[Model], clean_key_values: tuple, clean_extra_values: frozenset | set | None = None, ) -> None: """Add to the queury models set for the model.""" if model not in self.metadata[QUERY_MODELS]: self.metadata[QUERY_MODELS][model] = {} if clean_key_values not in self.metadata[QUERY_MODELS][model]: self.metadata[QUERY_MODELS][model][clean_key_values] = set() if clean_extra_values is None: clean_extra_values = frozenset() else: clean_extra_values = frozenset(clean_extra_values) self.metadata[QUERY_MODELS][model][clean_key_values] |= clean_extra_values def get_identifier_tuple( self, model: type[BaseModel], obj: Mapping ) -> tuple | None: """Parse first highest priority identifier from metadata.""" # Used by Objects with identifiers, not comic itself. identifiers = obj.get(IDENTIFIERS_KEY) if not identifiers: return None for id_source_enum in IdSources: id_source = id_source_enum.value if id_obj := identifiers.get(id_source): break else: id_source, id_obj = next(identifiers.items()) if not id_obj: return None id_type = model._meta.db_table.removeprefix("codex_") id_key = id_obj.get(ID_KEY_KEY) if not id_key: return None identifier_tuple_keys = None if id_source: self.add_query_model(IdentifierSource, (id_source,)) identifier_tuple_keys = (id_source, id_type, id_key) id_url = id_obj.get(ID_URL_KEY) identifier_tuple_extra = frozenset([(id_url,)]) self.add_query_model(Identifier, identifier_tuple_keys, identifier_tuple_extra) return identifier_tuple_keys def _set_simple_fk(self, related_field: Field, value) -> tuple: value = related_field.get_prep_value(value) if value is None: return () return (value,), None def _set_group_tree_group( self, model: type[BrowserGroupModel], name_field: Field, group: dict | None, group_list: list, ) -> tuple: name_key = NUMBER_KEY if model == Volume else NAME_KEY if group is None: group = {} group_name = group.get(name_key, model.DEFAULT_NAME) clean_group_name = name_field.get_prep_value(group_name) group_list.append(clean_group_name) extra_vals = [] if model == Volume: number_to = group.get(NUMBER_TO_KEY, model.DEFAULT_NAME) clean_number_to = name_field.get_prep_value(number_to) group_list.append(clean_number_to) else: identifier_tuple = self.get_identifier_tuple(model, group) extra_vals.append(identifier_tuple) count_key = GROUP_MODEL_COUNT_FIELDS[model] if count_key: count = group.get(count_key) with suppress(Exception): old_count_val = ( self.metadata[QUERY_MODELS].get(model, {}).get(group_list) ) count = max_none(old_count_val, count) extra_vals.append(count) return tuple(group_list), frozenset((tuple(extra_vals),)) def get_fk_metadata(self, md, path) -> None: """Aggregate Simple Foreign Keys.""" group_list = [] # prevents skipped metadata from destroying browser group links field_names = tuple(GROUP_FIELD_NAMES) + tuple( sorted((set(md.keys()) - _MINIMAL_KEYS) & COMIC_FK_FIELD_NAMES) ) for field_name in field_names: related_field = COMIC_FK_FIELD_NAMES_FIELD_MAP[field_name] # No identifiers on many2one fks yet # md_key = FIELD_NAME_TO_MD_KEY_MAP.get(field_name, field_name) if they ever diverge model: type[BaseModel] = related_field.model # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] md_key = field_name value = md.pop(md_key, None) if issubclass(model, BrowserGroupModel): key_values, extra_values = self._set_group_tree_group( model, related_field, value, group_list ) elif value is None: continue else: key_values, extra_values = self._set_simple_fk(related_field, value) if not key_values and not extra_values: continue if md_key != PROTAGONIST_KEY: # pyright: ignore[reportUnnecessaryComparison] self.add_query_model(model, key_values, extra_values) self.metadata[LINK_FKS][path][field_name] = key_values ================================================ FILE: codex/librarian/scribe/importer/read/many_to_many.py ================================================ """Aggregate ManyToMany Metadata.""" from collections.abc import Mapping from typing import TYPE_CHECKING from comicbox.schemas.comicbox import IDENTIFIERS_KEY, NUMBER_KEY, ROLES_KEY from django.db.models import CharField, Field from django.db.models.fields.related import ManyToManyField from codex.librarian.scribe.importer.const import ( COMIC_M2M_FIELDS, CREDITS_FIELD_NAME, IDENTIFIERS_FIELD_NAME, LINK_M2MS, STORY_ARC_NUMBERS_FIELD_NAME, get_key_index, ) from codex.librarian.scribe.importer.read.const import ( COMPLEX_FIELD_AGG_MAP, FIELD_NAME_TO_MD_KEY_MAP, ID_TYPE_KEY, ) from codex.librarian.scribe.importer.read.foreign_keys import ( AggregateForeignKeyMetadataImporter, ) from codex.models.comic import Comic from codex.models.groups import Folder from codex.models.identifier import IdentifierSource from codex.models.named import CreditRole if TYPE_CHECKING: from codex.models.base import BaseModel class AggregateManyToManyMetadataImporter(AggregateForeignKeyMetadataImporter): """Aggregate ManyToMany Metadata.""" def _get_m2m_metadata_dict_model_aggregate_sub_sub_value_identifiers( self, field_name, sub_sub_value ): field = Comic._meta.get_field(field_name) model: type[BaseModel] = field.related_model # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] return self.get_identifier_tuple(model, sub_sub_value) def _get_m2m_metadata_dict_model_aggregate_sub_sub_value_roles( self, sub_sub_field, sub_sub_value ) -> frozenset: clean_sub_sub_values = set() for sub_sub_sub_key_name, sub_sub_sub_value_obj in sub_sub_value.items(): clean_sub_sub_sub_key_name = sub_sub_field.get_prep_value( sub_sub_sub_key_name ) sub_sub_key_identifier_tuple = self.get_identifier_tuple( CreditRole, sub_sub_sub_value_obj ) clean_sub_sub_values.add( (clean_sub_sub_sub_key_name, sub_sub_key_identifier_tuple) ) return frozenset(clean_sub_sub_values) def _get_m2m_metadata_dict_model_aggregate_sub_sub_value( self, field_name: str, sub_value_obj: Mapping | None, sub_sub_md_key: str, sub_sub_field: Field, ): # sub_sub_md_key is identifiers or designation or roles or number # StoryArcNumber.number can be None. clean_sub_sub_values = None if sub_value_obj is None: return clean_sub_sub_values sub_sub_value = sub_value_obj.get(sub_sub_md_key) if sub_sub_value is None and field_name != STORY_ARC_NUMBERS_FIELD_NAME: return clean_sub_sub_values if sub_sub_md_key == IDENTIFIERS_KEY and sub_sub_value: clean_sub_sub_values = ( self._get_m2m_metadata_dict_model_aggregate_sub_sub_value_identifiers( field_name, sub_sub_value ) ) elif sub_sub_md_key == ROLES_KEY and sub_sub_value: clean_sub_sub_values = ( self._get_m2m_metadata_dict_model_aggregate_sub_sub_value_roles( sub_sub_field, sub_sub_value ) ) else: clean_sub_sub_values = sub_sub_field.get_prep_value(sub_sub_value) if sub_sub_md_key == NUMBER_KEY: clean_sub_sub_values = frozenset({clean_sub_sub_values}) return clean_sub_sub_values def _get_m2m_metadata_aggregate_sub_values_init( self, md_key: str, sub_key_name_field, sub_key_name: str, sub_key_identifier_field, sub_value_obj, ) -> tuple[tuple, list]: name_field = ( sub_key_name_field if isinstance(sub_key_name_field, CharField) else sub_key_name_field.field ) clean_sub_key_name = name_field.get_prep_value(sub_key_name) clean_sub_values = [] sub_model = None if sub_key_identifier_field: sub_model = sub_key_identifier_field.field.model sub_key_identifier_tuple = self.get_identifier_tuple( sub_model, sub_value_obj ) clean_sub_values.append(sub_key_identifier_tuple) elif md_key == IDENTIFIERS_KEY and clean_sub_key_name: sub_model = IdentifierSource clean_sub_key = (clean_sub_key_name,) if sub_model: clean_sub_values_set = ( frozenset({tuple(clean_sub_values)}) if clean_sub_values else None ) self.add_query_model(sub_model, clean_sub_key, clean_sub_values_set) return clean_sub_key, clean_sub_values def _get_roles_or_numbers( self, field, dict_field_keys, clean_sub_values, sub_value_obj ) -> set: roles_or_numbers = set() for sub_sub_md_key, sub_sub_field in dict_field_keys.items(): # Sub_sub_md_key is identifiers or designation or roles if sub_sub_md_key == ID_TYPE_KEY: # Special injection of identifier type clean_sub_values.append(sub_sub_field) continue # Get one sub value tuple for the aggregate tuple clean_sub_sub_value = ( self._get_m2m_metadata_dict_model_aggregate_sub_sub_value( field.name, sub_value_obj, sub_sub_md_key, sub_sub_field.field, ) ) if isinstance(clean_sub_sub_value, frozenset): # Special multiplier for Roles roles_or_numbers |= clean_sub_sub_value else: clean_sub_values.append(clean_sub_sub_value) return roles_or_numbers def _create_clean_sub_map( self, field, roles_or_numbers, clean_sub_key, clean_sub_values ) -> dict: # Create sub_map with special provisions for complex types. clean_sub_map = {} if roles_or_numbers: clean_sub_key = clean_sub_key[0] clean_sub_map = {} if field.name == CREDITS_FIELD_NAME: # Credits for role_values in roles_or_numbers: role_keys, role_extras = role_values self.add_query_model( CreditRole, (role_keys,), frozenset({(role_extras,)}) ) clean_sub_map[(clean_sub_key, role_keys)] = set() else: # StoryArcNumbers for role_values in roles_or_numbers: clean_sub_map[(clean_sub_key, role_values)] = set() elif field.name == IDENTIFIERS_FIELD_NAME: clean_sub_key += tuple(clean_sub_values[:2]) url_value = tuple(clean_sub_values[2:]) clean_sub_value = frozenset({url_value}) clean_sub_map = {clean_sub_key: clean_sub_value} else: clean_sub_value = ( frozenset((tuple(clean_sub_values),)) if clean_sub_values else frozenset() ) clean_sub_map = {clean_sub_key: clean_sub_value} # Create query models for complex types who's keys are other types. return clean_sub_map def _get_m2m_metadata_dict_model_aggregate_sub_values( self, md_key: str, field: ManyToManyField, sub_key_name: str, sub_value_obj: Mapping | None, ): # Clean name and if there are sub values get those. # sub_key: story_arc_name_a, # sub_key: character_name_a, sub_key_name_field, sub_key_identifier_field, dict_field_keys = ( COMPLEX_FIELD_AGG_MAP[field.name] ) clean_sub_key, clean_sub_values = ( self._get_m2m_metadata_aggregate_sub_values_init( md_key, sub_key_name_field, sub_key_name, sub_key_identifier_field, sub_value_obj, ) ) roles_or_numbers = self._get_roles_or_numbers( field, dict_field_keys, clean_sub_values, sub_value_obj ) return self._create_clean_sub_map( field, roles_or_numbers, clean_sub_key, clean_sub_values ) def _get_m2m_metadata_dict_model( self, md_key: str, field: ManyToManyField, values: Mapping[str, Mapping | None] | list | tuple | set | frozenset, ) -> dict: # Process values dict for a field # {story_arc_name_a: { number: 1, identifiers: {} }, ...} # {character_name_a: { identifiers: {} }, ...} clean_values_map: dict[tuple, frozenset[tuple]] = {} if not isinstance(values, Mapping): values = dict.fromkeys(values) for sub_key_name, sub_value in values.items(): clean_sub_map = self._get_m2m_metadata_dict_model_aggregate_sub_values( md_key, field, sub_key_name, # ty: ignore[invalid-argument-type] sub_value, # ty: ignore[invalid-argument-type] ) clean_values_map.update(clean_sub_map) related_model: type[BaseModel] = field.related_model if related_model != Folder: for key, value in clean_values_map.items(): self.add_query_model(related_model, key, value) return clean_values_map def _get_m2m_metadata_for_field(self, field, md, m2m_md) -> None: md_key = FIELD_NAME_TO_MD_KEY_MAP.get(field.name, field.name) values = md.pop(md_key, None) if values is None: return if clean_values := self._get_m2m_metadata_dict_model(md_key, field, values): key_index = get_key_index(field.related_model) clean_key_values = set() for clean_val_tuple in clean_values: clean_key_values_tuple = clean_val_tuple[:key_index] deep_trimmed_key_values = [] for val in clean_key_values_tuple: # might have to look up index in the future but for now it's the default. deep_trimmed_val = val[0] if isinstance(val, tuple) else val deep_trimmed_key_values.append(deep_trimmed_val) clean_key_values.add(tuple(deep_trimmed_key_values)) if field.name not in m2m_md: m2m_md[field.name] = set() m2m_md[field.name] |= clean_key_values def get_m2m_metadata(self, md, path) -> None: """Many_to_many fields get moved into a separate dict.""" m2m_md = {} for field in COMIC_M2M_FIELDS: self._get_m2m_metadata_for_field(field, md, m2m_md) self.metadata[LINK_M2MS][str(path)] = m2m_md ================================================ FILE: codex/librarian/scribe/importer/search/__init__.py ================================================ """Sync the fts index with the imported database.""" from codex.librarian.scribe.importer.search.prepare import ( SearchIndexPrepareImporter, ) from codex.librarian.scribe.importer.statii.search import ( ImporterFTSCreateStatus, ImporterFTSUpdateStatus, ) from codex.librarian.scribe.search.handler import SearchIndexer from codex.librarian.scribe.search.status import SearchIndexCleanStatus _STATII = (SearchIndexCleanStatus, ImporterFTSCreateStatus, ImporterFTSUpdateStatus) class SearchIndexImporter(SearchIndexPrepareImporter): """Sync the fts index with the imported database.""" def clean_fts(self) -> int: """Clean search index of any deleted comics.""" indexer = SearchIndexer( self.log, self.librarian_queue, self.db_write_lock, event=self.abort_event ) return indexer.remove_stale_records(log_success=False) def full_text_search(self) -> None: """Sync the fts index with the imported database.""" statii = (status_class() for status_class in _STATII) self.status_controller.start_many(statii) try: count = self.clean_fts() self.import_search_index(count) finally: self.status_controller.finish_many(statii) ================================================ FILE: codex/librarian/scribe/importer/search/prepare.py ================================================ """Prepare FTS update methods used in earlier import steps.""" from codex.librarian.scribe.importer.const import ( CREDITS_FIELD_NAME, FTS_CREATE, FTS_CREATED_M2MS, FTS_EXISTING_M2MS, FTS_UPDATE, NON_FTS_FIELDS, STORY_ARC_FIELD_NAME, STORY_ARC_NUMBERS_FIELD_NAME, ) from codex.librarian.scribe.importer.search.update import ( SearchIndexCreateUpdateImporter, ) from codex.util import flatten class SearchIndexPrepareImporter(SearchIndexCreateUpdateImporter): """Prepare FTS update methods used in earlier import steps.""" @staticmethod def minify_complex_link_to_fts_tuple( field_name: str, values: tuple | frozenset ) -> tuple[str, tuple]: """Only store the fts relevant parts of complex links.""" if field_name == CREDITS_FIELD_NAME: values = tuple(subvalues[0] for subvalues in values) elif field_name == STORY_ARC_NUMBERS_FIELD_NAME: field_name = STORY_ARC_FIELD_NAME + "s" return field_name, tuple(values) @staticmethod def _to_fts_tuple(values) -> tuple: return tuple( sorted(value for value in flatten(values) if isinstance(value, str)) ) def add_to_fts_existing(self, pk: int, field_name: str, values: tuple) -> None: """Add the existing values for creating a changed search entry.""" if field_name in NON_FTS_FIELDS or not values: return field_name, values = self.minify_complex_link_to_fts_tuple(field_name, values) fts_values = self._to_fts_tuple(values) if not fts_values: return if pk not in self.metadata[FTS_EXISTING_M2MS]: self.metadata[FTS_EXISTING_M2MS][pk] = {} self.metadata[FTS_EXISTING_M2MS][pk][field_name] = fts_values def add_links_to_fts( self, sub_key: int | str, field_name: str, values: tuple[str | tuple, ...], ) -> None: """Add a link to FTS structure.""" if field_name in NON_FTS_FIELDS: return if sub_key in self.metadata.get(FTS_UPDATE, {}): key = FTS_UPDATE elif sub_key in self.metadata.get(FTS_CREATE, {}): key = FTS_CREATE else: key = FTS_UPDATE if key not in self.metadata: self.metadata[key] = {} if sub_key not in self.metadata[key]: self.metadata[key][sub_key] = {} self.log.debug( f"FTS import anomaly, attempting FTS update for comic {sub_key} {field_name}" ) # Alternative might be kicking off an FTS sync flat_values = flatten(values) extra_values = ( self.metadata[FTS_CREATED_M2MS].get(field_name, {}).pop(flat_values, ()) ) fts_values = self._to_fts_tuple(flat_values + extra_values) self.metadata[key][sub_key][field_name] = fts_values ================================================ FILE: codex/librarian/scribe/importer/search/sync_m2m.py ================================================ """Update fts fields for updated foreign keys with non key search values.""" from django.db.models.expressions import Value from django.db.models.functions.datetime import Now from django.db.models.functions.text import Concat from loguru import logger from codex.librarian.scribe.importer.const import FTS_UPDATED_M2MS from codex.librarian.scribe.importer.finish import FinishImporter from codex.models.comic import ComicFTS from codex.models.functions import GroupConcat class SearchIndexSyncManyToManyImporter(FinishImporter): """Update fts fields for updated foreign keys with non key search values.""" @staticmethod def _to_fts_str(values) -> str: return ",".join(sorted(values)) @staticmethod def _get_fts_m2m_concat(field_name: str) -> Concat | GroupConcat: rel = "comic__" + field_name name_rel = rel + "__name" name_concat = GroupConcat( name_rel, order_by=name_rel, distinct=True, ) if field_name == "universes": exp = Concat( GroupConcat( f"{rel}__designation", distinct=True, order_by=(f"{rel}__designation"), ), Value(","), name_concat, ) else: exp = name_concat return exp def _sync_fts_for_m2m_updates_model( self, field_name: str, already_updated_comicfts_pks: tuple[int, ...], update_fields: tuple[str, ...], update_objs: list[ComicFTS], ) -> None: rel = f"comic__{field_name}__in" fts_value = self._get_fts_m2m_concat(field_name) model_pks = self.metadata[FTS_UPDATED_M2MS].pop(field_name) self.log.debug( f"Preparing {len(model_pks)} search entries for {field_name} updates." ) comicftss = ( ComicFTS.objects.filter(**{rel: model_pks}) .exclude(pk__in=already_updated_comicfts_pks) .annotate(fts_value=fts_value) .only(*update_fields) ) for comicfts in comicftss: value = comicfts.fts_value.strip(",") # pyright: ignore[reportAttributeAccessIssue] setattr(comicfts, field_name, value) comicfts.updated_at = Now() update_objs.append(comicfts) def sync_fts_for_m2m_updates( self, already_updated_comicfts_pks: tuple[int, ...] ) -> None: """Update fts entries for foreign keys.""" try: count = 0 update_field_names = tuple(self.metadata.get(FTS_UPDATED_M2MS, {}).keys()) if not update_field_names: return update_objs = [] for field_name in update_field_names: if self.abort_event.is_set(): return self._sync_fts_for_m2m_updates_model( field_name, already_updated_comicfts_pks, update_field_names, update_objs, ) count += len(update_objs) tags = ", ".join(update_field_names) self.log.debug( f"Updating {count} search index entries for comics linked to updated tags: {tags}" ) if count: ComicFTS.objects.bulk_update(update_objs, update_field_names) level = "INFO" if count else "DEBUG" self.log.log( level, f"Updated {count} search indexes entries for comics linked to updated tags: {tags}.", ) except Exception as exc: logger.warning(f"Syncing FTS for M2M Updates: {exc}") logger.exception(exc) finally: self.metadata.pop(FTS_UPDATED_M2MS, None) ================================================ FILE: codex/librarian/scribe/importer/search/update.py ================================================ """Search Index update.""" from time import time from humanize import naturaldelta from codex.librarian.scribe.importer.const import ( FTS_CREATE, FTS_CREATED_M2MS, FTS_EXISTING_M2MS, FTS_UPDATE, ) from codex.librarian.scribe.importer.search.sync_m2m import ( SearchIndexSyncManyToManyImporter, ) from codex.librarian.scribe.importer.statii.search import ( ImporterFTSCreateStatus, ImporterFTSStatus, ImporterFTSUpdateStatus, ) from codex.librarian.scribe.search.const import COMICFTS_UPDATE_FIELDS from codex.librarian.scribe.search.prepare import SearchEntryPrepare from codex.librarian.status import Status from codex.models.comic import ComicFTS class SearchIndexCreateUpdateImporter(SearchIndexSyncManyToManyImporter): """Search Index update methods.""" def _create_comicfts_entry( self, pk: int, entry: dict, obj_list: list[ComicFTS], status: Status, ) -> None: if entry: existing_m2m_values = self.metadata[FTS_EXISTING_M2MS].get(pk) SearchEntryPrepare.prepare_import_fts_entry( pk, entry, existing_m2m_values, None, obj_list, status, create=True ) else: status.decrement_total() self.status_controller.update(status) def _update_comicfts_entry( self, comicfts: ComicFTS, obj_list: list[ComicFTS], status: Status, ) -> None: comic_id = comicfts.comic_id # pyright:ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if entry := self.metadata[FTS_UPDATE].pop(comic_id): existing_m2m_values = self.metadata[FTS_EXISTING_M2MS].get(comic_id) SearchEntryPrepare.prepare_import_fts_entry( comic_id, entry, existing_m2m_values, comicfts, obj_list, status, create=False, ) else: status.decrement_total() self.status_controller.update(status) def _update_search_index_operate_get_status( self, total_entries: int, *, create: bool ) -> ImporterFTSStatus: status_class = ImporterFTSCreateStatus if create else ImporterFTSUpdateStatus return status_class(total=total_entries) def _update_search_index_operate_create( self, status: ImporterFTSStatus ) -> tuple[tuple[ComicFTS, ...], tuple[int, ...]]: entries = self.metadata.pop(FTS_CREATE, {}) pks = tuple(sorted(entries.keys())) obj_list = [] if self.abort_event.is_set(): return tuple(obj_list), () for pk in pks: entry = entries.pop(pk) self._create_comicfts_entry(pk, entry, obj_list, status) return tuple(obj_list), pks def _update_search_index_operate_update( self, status: ImporterFTSStatus ) -> tuple[tuple[ComicFTS, ...], tuple[int, ...]]: obj_list = [] if pks := tuple(sorted(self.metadata.get(FTS_UPDATE, {}).keys())): comicftss = ComicFTS.objects.filter(comic_id__in=pks) if self.abort_event.is_set(): return tuple(obj_list), () for comicfts in comicftss: self._update_comicfts_entry(comicfts, obj_list, status) if self.metadata[FTS_UPDATE]: # If updates not popped, turn them into creates. if FTS_CREATE not in self.metadata: self.metadata[FTS_CREATE] = {} self.metadata[FTS_CREATE].update(self.metadata[FTS_UPDATE]) self.metadata.pop(FTS_UPDATE) return tuple(obj_list), () def _update_search_index_create_or_update( self, obj_list: tuple[ComicFTS, ...], status, *, create: bool, ) -> int: if not obj_list or self.abort_event.is_set(): return 0 verb = "create" if create else "update" verbing = (verb[:-1] + "ing").capitalize() num_comic_fts = len(obj_list) batch_position = f"({status.complete}/{status.total})" self.log.debug(f"{verbing} {num_comic_fts} {batch_position} search entries...") if create: ComicFTS.objects.bulk_create(obj_list) else: ComicFTS.objects.bulk_update(obj_list, COMICFTS_UPDATE_FIELDS) return len(obj_list) def _update_search_index_operate(self, *, create: bool) -> int: key = FTS_CREATE if create else FTS_UPDATE total_entries = len(self.metadata.get(key, {})) status = self._update_search_index_operate_get_status( total_entries, create=create ) count = 0 try: verb = "create" if create else "update" if not total_entries: self.log.debug(f"No search entries to {verb}.") return count self.status_controller.start(status) verbing = "creating" if create else "updating" self.log.debug( f"Preparing {total_entries} comics for search index {verbing}..." ) if create: obj_list, created_comic_pks = self._update_search_index_operate_create( status ) else: obj_list, created_comic_pks = self._update_search_index_operate_update( status ) self.log.debug( f"Prepared {len(obj_list)} comics for search index {verbing}..." ) if self.abort_event.is_set(): return count count = self._update_search_index_create_or_update( obj_list, status, create=create, ) self.sync_fts_for_m2m_updates(created_comic_pks) finally: self.status_controller.finish(status) return count def _update_search_index_update(self) -> int: """Update out of date search entries.""" return self._update_search_index_operate(create=False) def _update_search_index_create(self) -> int: """Create missing search entries.""" return self._update_search_index_operate(create=True) def _update_search_index(self, cleaned_count: int) -> None: """Update or Rebuild the search index.""" start_time = time() if self.abort_event.is_set(): return updated_count = self._update_search_index_update() if self.abort_event.is_set(): return created_count = self._update_search_index_create() elapsed_time = time() - start_time elapsed = naturaldelta(elapsed_time) cleaned = f"{cleaned_count} cleaned up" if cleaned_count else "" updated = f"{updated_count} updated" if updated_count else "" created = f"{created_count} created" if created_count else "" summary_parts = filter(None, (cleaned, updated, created)) if summary := ", ".join(summary_parts): level = "INFO" log_txt = f"Search index entries {summary}" else: level = "DEBUG" log_txt = "Nothing done to Search index" log_txt += f" in {elapsed}." self.log.log(level, log_txt) def import_search_index(self, cleaned_count: int) -> None: """Update or Rebuild the search index.""" self.abort_event.clear() try: self._update_search_index(cleaned_count) except Exception: self.log.exception("Update search index") finally: if self.abort_event.is_set(): self.log.info("Search Index update aborted early.") self.abort_event.clear() self.metadata.pop(FTS_EXISTING_M2MS, None) self.metadata.pop(FTS_CREATED_M2MS, None) self.status_controller.finish_many( (ImporterFTSCreateStatus, ImporterFTSUpdateStatus) ) ================================================ FILE: codex/librarian/scribe/importer/statii/__init__.py ================================================ """Importer Statii.""" from codex.librarian.scribe.importer.statii.create import CREATE_STATII from codex.librarian.scribe.importer.statii.delete import REMOVE_STATII from codex.librarian.scribe.importer.statii.failed import FAILED_IMPORTS_STATII from codex.librarian.scribe.importer.statii.link import LINK_STATII from codex.librarian.scribe.importer.statii.moved import MOVED_STATII from codex.librarian.scribe.importer.statii.query import QUERY_STATII from codex.librarian.scribe.importer.statii.read import READ_STATII from codex.librarian.scribe.importer.statii.search import IMPORTER_SEARCH_INDEX_STATII IMPORTER_STATII = ( *CREATE_STATII, *REMOVE_STATII, *LINK_STATII, *MOVED_STATII, *QUERY_STATII, *READ_STATII, *IMPORTER_SEARCH_INDEX_STATII, *FAILED_IMPORTS_STATII, ) ================================================ FILE: codex/librarian/scribe/importer/statii/create.py ================================================ """Importer Create Statii.""" from abc import ABC from codex.librarian.scribe.importer.status import ImporterStatus class ImporterCreateStatus(ImporterStatus, ABC): """Importer Create Statii.""" class ImporterCreateTagsStatus(ImporterCreateStatus): """Importer Create Tags Status.""" CODE = "ICT" VERB = "Create" ITEM_NAME = "tags" class ImporterUpdateTagsStatus(ImporterCreateStatus): """Importer Create Tags Status.""" CODE = "IUT" VERB = "Update" ITEM_NAME = "tags" class ImporterCreateComicsStatus(ImporterCreateStatus): """Importer Create Comics Status.""" CODE = "ICC" VERB = "Create" ITEM_NAME = "comics" class ImporterUpdateComicsStatus(ImporterCreateStatus): """Importer Update Comics Status.""" CODE = "IUC" VERB = "Update" ITEM_NAME = "comics" class ImporterCreateCoversStatus(ImporterCreateStatus): """Importer Create Tags Status.""" CODE = "ICV" VERB = "Create" ITEM_NAME = "custom covers" class ImporterUpdateCoversStatus(ImporterCreateStatus): """Importer Updated Tags Status.""" CODE = "IUV" VERB = "Update" ITEM_NAME = "custom covers" CREATE_STATII = ( ImporterCreateTagsStatus, ImporterUpdateTagsStatus, ImporterCreateComicsStatus, ImporterUpdateComicsStatus, ImporterCreateCoversStatus, ImporterUpdateCoversStatus, ) ================================================ FILE: codex/librarian/scribe/importer/statii/delete.py ================================================ """Importer Remove Sattii.""" from abc import ABC from codex.librarian.scribe.importer.status import ImporterStatus class ImporterRemoveStatus(ImporterStatus, ABC): """Importer Remove Sattii.""" VERB = "Remove" class ImporterRemoveFoldersStatus(ImporterRemoveStatus): """Importer Remove Folders Status.""" CODE = "IRF" ITEM_NAME = "folders" class ImporterRemoveComicsStatus(ImporterRemoveStatus): """Importer Remove Comics Status.""" CODE = "IRC" ITEM_NAME = "comics" class ImporterRemoveCoversStatus(ImporterRemoveStatus): """Importer Remove Covers Status.""" CODE = "IRV" ITEM_NAME = "custom covers" REMOVE_STATII = ( ImporterRemoveFoldersStatus, ImporterRemoveComicsStatus, ImporterRemoveCoversStatus, ) ================================================ FILE: codex/librarian/scribe/importer/statii/failed.py ================================================ """Importer Failed Imports Sattii.""" from abc import ABC from codex.librarian.scribe.importer.status import ImporterStatus class ImporterFailedImportStatus(ImporterStatus, ABC): """Importer Failed Imports Statii.""" ITEM_NAME = "failed imports" class ImporterFailedImportsQueryStatus(ImporterFailedImportStatus): """Importer Failed Imports Query Statii.""" CODE = "IFQ" VERB = "Query" _verbed = "Queried" class ImporterFailedImportsUpdateStatus(ImporterFailedImportStatus): """Importer Failed Imports Update Statii.""" CODE = "IFU" VERB = "Update" class ImporterFailedImportsCreateStatus(ImporterFailedImportStatus): """Importer Failed Imports Create Statii.""" CODE = "IFC" VERB = "Mark Failed" _verbed = "Marked Failed" class ImporterFailedImportsDeleteStatus(ImporterFailedImportStatus): """Importer Failed Imports Create Statii.""" CODE = "IFD" VERB = "Clean up" _verbed = "Cleaned up" FAILED_IMPORTS_STATII = ( ImporterFailedImportsQueryStatus, ImporterFailedImportsUpdateStatus, ImporterFailedImportsCreateStatus, ImporterFailedImportsDeleteStatus, ) ================================================ FILE: codex/librarian/scribe/importer/statii/link.py ================================================ """Importer Link Statii.""" from codex.librarian.scribe.importer.status import ImporterStatus class ImporterLinkStatus(ImporterStatus): """Importer Link Statii.""" VERB = "Link" _verbed = "Linked" class ImporterLinkTagsStatus(ImporterLinkStatus): """Importer Link Tags Status.""" CODE = "ILT" ITEM_NAME = "tags" class ImporterLinkCoversStatus(ImporterLinkStatus): """Importer Link Covers Status.""" CODE = "ILV" ITEM_NAME = "custom covers" LINK_STATII = (ImporterLinkTagsStatus, ImporterLinkCoversStatus) ================================================ FILE: codex/librarian/scribe/importer/statii/moved.py ================================================ """Importer Moved Statii.""" from abc import ABC from codex.librarian.scribe.importer.status import ImporterStatus class ImporterMoveStatus(ImporterStatus, ABC): """Importer Moved Status.""" VERB = "Move" class ImporterMoveFoldersStatus(ImporterMoveStatus): """Importer Moved Folder Status.""" CODE = "IMF" ITEM_NAME = "folders" class ImporterMoveComicsStatus(ImporterMoveStatus): """Importer Moved Comics Status.""" CODE = "IMC" ITEM_NAME = "comics" class ImporterMoveCoversStatus(ImporterMoveStatus): """Importer Moved Covers Status.""" CODE = "IMV" ITEM_NAME = "custom covers" MOVED_STATII = ( ImporterMoveFoldersStatus, ImporterMoveComicsStatus, ImporterMoveCoversStatus, ) ================================================ FILE: codex/librarian/scribe/importer/statii/query.py ================================================ """Importer Query Statii.""" from codex.librarian.scribe.importer.status import ImporterStatus class ImporterQueryStatus(ImporterStatus): """Importer Query Statii.""" VERB = "Query" _verbed = "Queried" class ImporterQueryMissingTagsStatus(ImporterQueryStatus): """Importer Aggregate Status.""" CODE = "IQT" ITEM_NAME = "missing tags" class ImporterQueryComicUpdatesStatus(ImporterQueryStatus): """Importer Comic Updates Status.""" CODE = "IQC" ITEM_NAME = "comics" class ImporterQueryTagLinksStatus(ImporterQueryStatus): """Importer Tag Links Status.""" CODE = "IQL" ITEM_NAME = "tag links" class ImporterQueryMissingCoversStatus(ImporterQueryStatus): """Importer Missing Covers Status.""" CODE = "IQV" ITEM_NAME = "missing custom covers" QUERY_STATII = ( ImporterQueryMissingTagsStatus, ImporterQueryComicUpdatesStatus, ImporterQueryTagLinksStatus, ImporterQueryMissingCoversStatus, ) ================================================ FILE: codex/librarian/scribe/importer/statii/read.py ================================================ """Importer Read Statii.""" from abc import ABC from codex.librarian.scribe.importer.status import ImporterStatus class ImporterReadStatus(ImporterStatus, ABC): """Importer Read Statii.""" ITEM_NAME = "comics" class ImporterReadComicsStatus(ImporterReadStatus): """Importer Read Status.""" CODE = "IRT" VERB = "Read tags from" _verbed = "Read tags from" class ImporterAggregateStatus(ImporterReadStatus): """Importer Aggregate Status.""" CODE = "IAT" VERB = "Aggregate tags from" _verbed = "Aggregated tags from" READ_STATII = (ImporterReadComicsStatus, ImporterAggregateStatus) ================================================ FILE: codex/librarian/scribe/importer/statii/search.py ================================================ """Importer Search Index Statii.""" from abc import ABC from codex.librarian.scribe.importer.status import ImporterStatus class ImporterFTSStatus(ImporterStatus, ABC): """Importer Search Index Statii.""" ITEM_NAME = "search index entries" class ImporterFTSUpdateStatus(ImporterFTSStatus): """Importer Update Search Index Status.""" CODE = "ISU" VERB = "Update" class ImporterFTSCreateStatus(ImporterFTSStatus): """Importer Update Search Index Status.""" CODE = "ISC" VERB = "Create" IMPORTER_SEARCH_INDEX_STATII = (ImporterFTSUpdateStatus, ImporterFTSCreateStatus) ================================================ FILE: codex/librarian/scribe/importer/status.py ================================================ """Librarian Status for scribe bulk writes.""" from abc import ABC from codex.librarian.scribe.status import ScribeStatus class ImporterStatus(ScribeStatus, ABC): """Importer Status.""" ================================================ FILE: codex/librarian/scribe/importer/tasks.py ================================================ """DB Import Tasks.""" from collections.abc import Mapping from dataclasses import dataclass, field from codex.librarian.scribe.tasks import ScribeTask @dataclass class ImportTask(ScribeTask): """For sending to the importer.""" PRIORITY = 100 library_id: int dirs_moved: Mapping[str, str] = field(default_factory=dict) dirs_modified: frozenset[str] = frozenset() # dirs_created: frozenset[str] | None = frozenset() # noqa: ERA001 dirs_deleted: frozenset[str] = frozenset() files_moved: Mapping[str, str] = field(default_factory=dict) files_modified: frozenset[str] = frozenset() files_created: frozenset[str] = frozenset() files_deleted: frozenset[str] = frozenset() covers_moved: Mapping[str, str] = field(default_factory=dict) covers_modified: frozenset[str] = frozenset() covers_created: frozenset[str] = frozenset() covers_deleted: frozenset[str] = frozenset() force_import_metadata: bool = False check_metadata_mtime: bool = True def total(self) -> int: """Total number of operations.""" return ( len(self.dirs_moved) + len(self.dirs_modified) + len(self.dirs_deleted) + len(self.files_moved) + len(self.files_modified) + len(self.files_created) + len(self.files_deleted) + len(self.covers_moved) + len(self.covers_modified) + len(self.covers_created) + len(self.covers_deleted) ) ================================================ FILE: codex/librarian/scribe/janitor/__init__.py ================================================ """Janitor tasks.""" ================================================ FILE: codex/librarian/scribe/janitor/adopt_folders.py ================================================ """Bulk import and move comics and folders.""" from pathlib import Path from codex.librarian.notifier.tasks import LIBRARY_CHANGED_TASK from codex.librarian.scribe.importer.importer import ComicImporter from codex.librarian.scribe.importer.statii.moved import ImporterMoveFoldersStatus from codex.librarian.scribe.importer.tasks import ImportTask from codex.librarian.scribe.janitor.status import JanitorAdoptOrphanFoldersStatus from codex.librarian.scribe.search.tasks import SearchIndexSyncTask from codex.librarian.worker import WorkerStatusAbortableBase from codex.models import Folder, Library class OrphanFolderAdopter(WorkerStatusAbortableBase): """A worker to handle all bulk database updates.""" def _adopt_orphan_folders_for_library(self, library) -> tuple | tuple[bool, int]: """Adopt orphan folders for one library.""" count = 0 orphan_folder_paths = ( Folder.objects.filter(library=library, parent_folder=None) .exclude(path=library.path) .values_list("path", flat=True) ) # Move in place # Exclude deleted folders folders_moved = { path: path for path in orphan_folder_paths if Path(path).is_dir() } if folders_moved: self.log.debug( f"{len(folders_moved)} orphan folders found in {library.path}" ) else: self.log.debug(f"No orphan folders in {library.path}") return False, count # An abridged import task. task = ImportTask(library_id=library.pk, dirs_moved=folders_moved) importer = ComicImporter( task, self.log, self.librarian_queue, self.db_write_lock, self.abort_event ) count = importer.bulk_folders_moved(mark_in_progress=True) return True, count def adopt_orphan_folders(self) -> None: """Find orphan folders and move them into their correct place.""" self.abort_event.clear() status = JanitorAdoptOrphanFoldersStatus() moved_status = ImporterMoveFoldersStatus() total_count = 0 try: self.status_controller.start_many((status, moved_status)) libraries = Library.objects.filter(covers_only=False).only("path") for library in libraries.iterator(): folders_left = True while folders_left: if self.abort_event.is_set(): return # Run until there are no orphan folders folders_left, count = self._adopt_orphan_folders_for_library( library ) total_count += count finally: self.status_controller.finish_many((moved_status, status)) if total_count: self.librarian_queue.put(LIBRARY_CHANGED_TASK) task = SearchIndexSyncTask() self.librarian_queue.put(task) if self.abort_event.is_set(): self.log.debug("Adopt Orphan Folders aborted early.") self.abort_event.clear() ================================================ FILE: codex/librarian/scribe/janitor/cleanup.py ================================================ """Clean up the database after moves or imports.""" from pathlib import Path from types import MappingProxyType from django.contrib.sessions.models import Session from django.db.models.functions.datetime import Now from codex.librarian.scribe.janitor.failed_imports import JanitorUpdateFailedImports from codex.librarian.scribe.janitor.status import ( JanitorCleanupBookmarksStatus, JanitorCleanupCoversStatus, JanitorCleanupSessionsStatus, JanitorCleanupSettingsStatus, JanitorCleanupTagsStatus, ) from codex.models import ( AgeRating, Character, Country, Credit, CreditPerson, CreditRole, Folder, Genre, Identifier, IdentifierSource, Imprint, Language, Location, OriginalFormat, Publisher, ScanInfo, Series, SeriesGroup, Story, StoryArc, StoryArcNumber, Tag, Tagger, Team, Universe, Volume, ) from codex.models.bookmark import Bookmark from codex.models.paths import CustomCover from codex.models.settings import SettingsBrowser, SettingsReader _FK_MODELS = ( Identifier, AgeRating, Country, CreditRole, CreditPerson, StoryArc, IdentifierSource, Credit, Character, Genre, Folder, Language, Location, Imprint, OriginalFormat, Publisher, Series, SeriesGroup, ScanInfo, StoryArcNumber, Story, Tagger, Tag, Team, Volume, Universe, ) _TOTAL_NUM_FK_CLASSES = len(_FK_MODELS) def _create_reverse_rel_map_for_model(model, rel_map) -> None: rev_rels = [] filter_dict = {} for field in model._meta.get_fields(): if not field.auto_created: continue if hasattr(field, "get_accessor_name") and (an := field.get_accessor_name()): rel = an elif field.name: rel = field.name else: continue if rel == "id": continue rel = rel.removesuffix("_set") rev_rels.append(rel) filter_dict[f"{rel}__isnull"] = True if filter_dict: rel_map[model] = filter_dict def _create_reverse_rel_map() -> MappingProxyType: rel_map = {} for model in _FK_MODELS: _create_reverse_rel_map_for_model(model, rel_map) return MappingProxyType(rel_map) _MODEL_REVERSE_EMPTY_FILTER_MAP = _create_reverse_rel_map() _BOOKMARK_FILTER = dict.fromkeys( (f"{rel}__isnull" for rel in ("session", "user", "comic")), True ) _SETTINGS_ORPHAN_FILTER = dict.fromkeys( (f"{rel}__isnull" for rel in ("session", "user")), True ) class JanitorCleanup(JanitorUpdateFailedImports): """Cleanup methods for Janitor.""" def _cleanup_fks_model(self, model, filter_dict, status): status.subtitle = model._meta.verbose_name_plural self.status_controller.update(status) qs = model.objects.filter(**filter_dict).distinct() count, _ = qs.delete() status.complete += count self.status_controller.update(status) return count def _cleanup_fks_one_level(self, status) -> int: count = 0 for model, filter_dict in _MODEL_REVERSE_EMPTY_FILTER_MAP.items(): if self.abort_event.is_set(): return count count += self._cleanup_fks_model(model, filter_dict, status) return count def cleanup_fks(self) -> None: """Clean up unused foreign keys.""" self.abort_event.clear() status = JanitorCleanupTagsStatus(0) try: self.status_controller.start(status) self.log.debug("Cleaning up orphan tags...") count = 1 while count: # Keep churning until we stop finding orphan tags. count = self._cleanup_fks_one_level(status) level = "INFO" if status.complete else "DEBUG" self.log.log(level, f"Cleaned up {status.complete} unused tags.") finally: if self.abort_event.is_set(): self.log.info("Cleanup tags task aborted early.") self.abort_event.clear() self.status_controller.finish(status) def cleanup_custom_covers(self) -> None: """Clean up unused custom covers.""" covers = CustomCover.objects.only("path") status = JanitorCleanupCoversStatus(0, covers.count()) delete_pks = [] try: self.status_controller.start(status) self.log.debug("Cleaning up db custom covers with no source images...") for cover in covers.iterator(): if not Path(cover.path).exists(): delete_pks.append(cover.pk) status.increment_complete() delete_qs = CustomCover.objects.filter(pk__in=delete_pks) count, _ = delete_qs.delete() status.complete = count finally: self.status_controller.finish(status) def cleanup_sessions(self) -> None: """Delete corrupt sessions.""" status = JanitorCleanupSessionsStatus() try: self.status_controller.start(status) qs = Session.objects.filter(expire_date__lt=Now()) count, _ = qs.delete() if count: self.log.info(f"Deleted {count} expired sessions.") bad_session_keys = set() for encoded_session in Session.objects.all(): session = encoded_session.get_decoded() if not session: bad_session_keys.add(encoded_session.session_key) if bad_session_keys: bad_sessions = Session.objects.filter(session_key__in=bad_session_keys) count, _ = bad_sessions.delete() self.log.info(f"Deleted {count} corrupt sessions.") finally: self.status_controller.finish(status) def cleanup_orphan_bookmarks(self) -> None: """Delete bookmarks without users or sessions.""" status = JanitorCleanupBookmarksStatus() try: self.status_controller.start(status) orphan_bms = Bookmark.objects.filter(**_BOOKMARK_FILTER) count, _ = orphan_bms.delete() level = "INFO" if count else "DEBUG" self.log.log(level, f"Deleted {count} orphan bookmarks.") finally: self.status_controller.finish(status) def cleanup_orphan_settings(self) -> None: """Delete settings rows without both a user and a session.""" status = JanitorCleanupSettingsStatus() try: self.status_controller.start(status) total = 0 for model in (SettingsBrowser, SettingsReader): orphans = model.objects.filter(**_SETTINGS_ORPHAN_FILTER) count, _ = orphans.delete() total += count level = "INFO" if total else "DEBUG" self.log.log(level, f"Deleted {total} orphan settings rows.") finally: self.status_controller.finish(status) ================================================ FILE: codex/librarian/scribe/janitor/failed_imports.py ================================================ """Force update events for failed imports.""" from codex.librarian.fs.events import FSChange, FSEvent from codex.librarian.fs.tasks import FSEventTask from codex.librarian.scribe.janitor.vacuum import JanitorVacuum from codex.models import FailedImport, Library class JanitorUpdateFailedImports(JanitorVacuum): """Methods for updating failed imports.""" def _force_update_failed_imports(self, library_id) -> None: """Force update events for failed imports in a library.""" failed_import_paths = FailedImport.objects.filter( library=library_id ).values_list("path", flat=True) for path in failed_import_paths: event = FSEvent(src_path=path, change=FSChange.modified) task = FSEventTask(library_id, event) self.librarian_queue.put(task) def force_update_all_failed_imports(self) -> None: """Force update events for failed imports in every library.""" pks = Library.objects.filter(covers_only=False).values_list("pk", flat=True) for pk in pks: self._force_update_failed_imports(pk) ================================================ FILE: codex/librarian/scribe/janitor/integrity/__init__.py ================================================ """Database integrity checks and remedies.""" # Uses app.get_model() because functions may also be called before the models are ready on startup. from django.db import DEFAULT_DB_ALIAS, connections from codex.librarian.scribe.janitor.integrity.foreign_keys import fix_foreign_keys from codex.librarian.scribe.janitor.status import ( JanitorDBFKIntegrityStatus, JanitorDBFTSIntegrityStatus, JanitorDBFTSRebuildStatus, JanitorDBIntegrityStatus, ) from codex.librarian.scribe.janitor.tasks import JanitorFTSRebuildTask from codex.librarian.worker import WorkerStatusAbortableBase _FTS_INSERT_TMPL = "INSERT INTO codex_comicfts (codex_comicfts) VALUES('%s');" _PRAGMA_TMPL = "PRAGMA %s;" def _exec_sql(sql): """Run sql on an potentially unready database..""" connection = connections[DEFAULT_DB_ALIAS] connection.prepare_database() with connection.cursor() as cursor: cursor.execute("PRAGMA wal_checkpoint(TRUNCATE)") cursor.execute(sql) return cursor.fetchall() def _is_integrity_ok(results) -> bool: return ( results and len(results) == 1 and len(results[0]) == 1 and results[0][0] == "ok" ) def integrity_check(log, *, long: bool) -> None: """Run sqlite3 integrity check.""" pragma = "integrity_check" if long else "quick_check" sql = _PRAGMA_TMPL % pragma log.debug(f"Running database '{sql}'...") results = _exec_sql(sql) if _is_integrity_ok(results): length = "" if long else "quick " log.success(f"Database passed {length}integrity check.") else: log.warning(f"Database '{sql}' returned results:") log.warning(results) log.warning( "See the README for database rebuild instructions if the above warning looks severe." ) def fts_rebuild() -> None: """FTS Rebuild.""" sql = _FTS_INSERT_TMPL % "rebuild" _exec_sql(sql) def fts_integrity_check(log) -> bool: """Run sqlite3 fts integrity check.""" results = [] sql = _FTS_INSERT_TMPL % "integrity-check" success = False results = [] try: results = _exec_sql(sql) if results: # I'm not sure if this raises or puts the error in the results. raise ValueError(results) # noqa: TRY301 log.success("Full Text Search Index passed integrity check.") success = True except Exception: log.exception("Full Text Search Index failed integrity check") log.debug(results) return success class JanitorIntegrity(WorkerStatusAbortableBase): """Integrity Check Mixin.""" def foreign_key_check(self) -> None: """Foreign Key Check task.""" status = JanitorDBFKIntegrityStatus() try: self.status_controller.start(status) with self.db_write_lock: fix_foreign_keys(self.log) finally: self.status_controller.finish(status) def integrity_check(self, *, long: bool) -> None: """Integrity check task.""" subtitle = "" if long else "Quick" status = JanitorDBIntegrityStatus(subtitle=subtitle) try: self.status_controller.start(status) with self.db_write_lock: integrity_check(self.log, long=long) finally: self.status_controller.finish(status) def fts_rebuild(self) -> None: """FTS rebuild task.""" status = JanitorDBFTSRebuildStatus() try: self.status_controller.start(status) with self.db_write_lock: fts_rebuild() finally: self.status_controller.finish(status) def fts_integrity_check(self) -> None: """FTS integrity check task.""" status = JanitorDBFTSIntegrityStatus() try: self.status_controller.start(status) with self.db_write_lock: success = fts_integrity_check(self.log) if not success: self.librarian_queue.put(JanitorFTSRebuildTask()) finally: self.status_controller.finish(status) ================================================ FILE: codex/librarian/scribe/janitor/integrity/foreign_keys.py ================================================ """Database integrity checks and remedies.""" # Uses app.get_model() because functions may also be called before the models are ready on startup. from typing import TYPE_CHECKING from django.apps import apps from django.db import DEFAULT_DB_ALIAS, connections from django.db.models.functions import Now if TYPE_CHECKING: from django.db.models.manager import BaseManager from codex.models.comic import Comic def _get_fk_column_name(cursor, table_name: str, fkid: int) -> str | None: """Resolve fkid to the child column name via PRAGMA foreign_key_list.""" cursor.execute(f'PRAGMA foreign_key_list("{table_name}")') # Columns: id, seq, table, from, to, on_update, on_delete, match for row in cursor.fetchall(): if row[0] == fkid and row[1] == 0: return row[3] # 'from' = child column name return None def _is_column_nullable(cursor, table_name: str, column_name: str) -> bool: """Check if a column allows NULL via PRAGMA table_info.""" cursor.execute(f'PRAGMA table_info("{table_name}")') # Columns: cid, name, type, notnull, dflt_value, pk for row in cursor.fetchall(): if row[1] == column_name: return row[3] == 0 # notnull=0 means nullable return False def _collect_comic_ids_for_table(cursor, table_name: str, rowids: set) -> set: """Collect comic PKs that need re-indexing after FK fixes.""" if table_name == "codex_comic": # rowid == pk for standard Django integer-PK models. return set(rowids) # For m2m through tables or other tables with a comic_id column. try: cursor.execute(f'PRAGMA table_info("{table_name}")') has_comic_id = any(row[1] == "comic_id" for row in cursor.fetchall()) except Exception: return set() if not has_comic_id: return set() placeholders = ",".join(["%s"] * len(rowids)) cursor.execute( f'SELECT comic_id FROM "{table_name}" WHERE rowid IN ({placeholders})', # noqa: S608 sorted(rowids), ) return {row[0] for row in cursor.fetchall() if row[0] is not None} def _mark_comics_for_update(fix_comic_pks, log) -> None: """Mark comics with altered foreign keys for update.""" if not fix_comic_pks: return comic_model: type[Comic] = apps.get_model(app_label="codex", model_name="comic") # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] outdated_comics: BaseManager[Comic] = comic_model.objects.filter( pk__in=fix_comic_pks ).only("stat", "updated_at") if not outdated_comics: return update_comics = [] now = Now() for comic in outdated_comics: stat_list = comic.stat if not stat_list: continue stat_list[8] = 0.0 comic.stat = stat_list # pyright: ignore[reportAttributeAccessIssue] comic.updated_at = now update_comics.append(comic) if update_comics: count = comic_model.objects.bulk_update( update_comics, fields=["stat", "updated_at"] ) log.info(f"Marked {count} comics with bad relations for update by poller.") def _group_fk_violations( cursor, results, log ) -> dict[str, dict[int, list[tuple[str, bool]]]]: """ Resolve raw PRAGMA foreign_key_check rows into grouped violations. Returns {table: {rowid: [(column_name, nullable), ...]}}. """ col_info_cache: dict[tuple[str, int], tuple[str | None, bool]] = {} violations: dict[str, dict[int, list[tuple[str, bool]]]] = {} for table_name, rowid, _parent, fkid in results: cache_key = (table_name, fkid) if cache_key not in col_info_cache: fk_col = _get_fk_column_name(cursor, table_name, fkid) nullable = ( _is_column_nullable(cursor, table_name, fk_col) if fk_col else False ) col_info_cache[cache_key] = (fk_col, nullable) fk_col, nullable = col_info_cache[cache_key] if not fk_col: log.warning( f"Could not resolve FK column for {table_name} fkid={fkid}, skipping" ) continue violations.setdefault(table_name, {}).setdefault(rowid, []).append( (fk_col, nullable) ) return violations def _fix_fk_violations( cursor, violations: dict[str, dict[int, list[tuple[str, bool]]]] ) -> tuple[int, int, set[int]]: """ Null or delete rows with broken foreign keys. For each violation: - If all bad FK columns on the row are nullable: NULL them. - Otherwise: DELETE the row. Returns (nulled_count, deleted_count, fix_comic_pks). """ nulled = 0 deleted = 0 fix_comic_pks: set[int] = set() for table_name, rows in violations.items(): # Collect comic_ids before we delete any rows. fix_comic_pks |= _collect_comic_ids_for_table( cursor, table_name, set(rows.keys()) ) for rowid, fk_cols in rows.items(): all_nullable = all(nullable for _, nullable in fk_cols) if all_nullable: set_clauses = ", ".join(f'"{col}" = NULL' for col, _ in fk_cols) cursor.execute( f'UPDATE "{table_name}" SET {set_clauses} WHERE rowid = %s', # noqa: S608 [rowid], ) nulled += cursor.rowcount else: cursor.execute( f'DELETE FROM "{table_name}" WHERE rowid = %s', # noqa: S608 [rowid], ) deleted += cursor.rowcount return nulled, deleted, fix_comic_pks def fix_foreign_keys(log) -> None: """ Fix all foreign key violations using raw SQL. Uses PRAGMA foreign_key_check to find violations, then for each bad row nulls the FK column if nullable, or deletes the row if not. Operates entirely via raw SQL and rowid so it works for all tables including third-party ones (e.g. authtoken_token) without requiring ORM model resolution. """ connection = connections[DEFAULT_DB_ALIAS] connection.prepare_database() try: with connection.cursor() as cursor: cursor.execute("PRAGMA wal_checkpoint(TRUNCATE)") cursor.execute("PRAGMA foreign_key_check") results = cursor.fetchall() if not results: log.success("Database passed foreign key check.") return log.warning( f"Found {len(results)} foreign key violations. Attempting fix..." ) log.debug(results) violations = _group_fk_violations(cursor, results, log) nulled, deleted, fix_comic_pks = _fix_fk_violations(cursor, violations) if nulled: log.info( f"Nulled bad foreign keys on {nulled} rows across {len(violations)} tables." ) if deleted: log.info( f"Deleted {deleted} rows with non-nullable broken foreign keys." ) if not nulled and not deleted: log.success("Database passed foreign key check.") return _mark_comics_for_update(fix_comic_pks, log) except Exception: log.exception("Integrity: foreign_key_check") ================================================ FILE: codex/librarian/scribe/janitor/janitor.py ================================================ """Janitor task runner.""" from codex.librarian.bookmark.tasks import CodexLatestVersionTask from codex.librarian.covers.status import FindOrphanCoversStatus, RemoveCoversStatus from codex.librarian.covers.tasks import CoverRemoveOrphansTask from codex.librarian.scribe.importer.statii.moved import ImporterMoveFoldersStatus from codex.librarian.scribe.janitor.status import ( JanitorAdoptOrphanFoldersStatus, JanitorCleanupBookmarksStatus, JanitorCleanupCoversStatus, JanitorCleanupSessionsStatus, JanitorCleanupSettingsStatus, JanitorCleanupTagsStatus, JanitorCodexLatestVersionStatus, JanitorDBBackupStatus, JanitorDBFKIntegrityStatus, JanitorDBFTSIntegrityStatus, JanitorDBIntegrityStatus, JanitorDBOptimizeStatus, ) from codex.librarian.scribe.janitor.tasks import ( JanitorAdoptOrphanFoldersTask, JanitorBackupTask, JanitorCleanCoversTask, JanitorCleanFKsTask, JanitorCleanupBookmarksTask, JanitorCleanupSessionsTask, JanitorCleanupSettingsTask, JanitorCodexUpdateTask, JanitorForeignKeyCheckTask, JanitorFTSIntegrityCheckTask, JanitorFTSRebuildTask, JanitorImportForceAllFailedTask, JanitorIntegrityCheckTask, JanitorNightlyTask, JanitorVacuumTask, ) from codex.librarian.scribe.janitor.update import JanitorCodexUpdate from codex.librarian.scribe.search.status import ( SearchIndexCleanStatus, SearchIndexOptimizeStatus, SearchIndexSyncCreateStatus, SearchIndexSyncUpdateStatus, ) from codex.librarian.scribe.search.tasks import ( SearchIndexOptimizeTask, SearchIndexSyncTask, ) from codex.librarian.tasks import LibrarianTask from codex.models import Timestamp _JANITOR_STATII = ( JanitorCodexLatestVersionStatus, JanitorAdoptOrphanFoldersStatus, ImporterMoveFoldersStatus, JanitorDBFKIntegrityStatus, JanitorDBIntegrityStatus, JanitorDBFTSIntegrityStatus, JanitorCleanupTagsStatus, JanitorCleanupCoversStatus, JanitorCleanupSessionsStatus, JanitorCleanupBookmarksStatus, JanitorCleanupSettingsStatus, SearchIndexCleanStatus, SearchIndexSyncUpdateStatus, SearchIndexSyncCreateStatus, SearchIndexOptimizeStatus, JanitorDBOptimizeStatus, JanitorDBBackupStatus, FindOrphanCoversStatus, RemoveCoversStatus, ) _NIGHTLY_TASK_CLASSES: tuple[type[LibrarianTask], ...] = ( CodexLatestVersionTask, JanitorAdoptOrphanFoldersTask, JanitorForeignKeyCheckTask, JanitorIntegrityCheckTask, JanitorFTSIntegrityCheckTask, JanitorCleanFKsTask, JanitorCleanCoversTask, JanitorCleanupSessionsTask, JanitorCleanupBookmarksTask, JanitorCleanupSettingsTask, SearchIndexSyncTask, SearchIndexOptimizeTask, JanitorVacuumTask, JanitorBackupTask, CoverRemoveOrphansTask, ) _JANITOR_METHOD_MAP: dict[type, str] = { JanitorVacuumTask: "vacuum_db", JanitorCleanFKsTask: "cleanup_fks", JanitorCleanCoversTask: "cleanup_custom_covers", JanitorCleanupSessionsTask: "cleanup_sessions", JanitorCleanupBookmarksTask: "cleanup_orphan_bookmarks", JanitorCleanupSettingsTask: "cleanup_orphan_settings", JanitorImportForceAllFailedTask: "force_update_all_failed_imports", JanitorForeignKeyCheckTask: "foreign_key_check", JanitorFTSIntegrityCheckTask: "fts_integrity_check", JanitorFTSRebuildTask: "fts_rebuild", JanitorNightlyTask: "queue_nightly_tasks", } class Janitor(JanitorCodexUpdate): """Janitor inline task runner.""" def queue_nightly_tasks(self) -> None: """Queue all the janitor tasks.""" try: self.status_controller.start_many(_JANITOR_STATII) for task_class in _NIGHTLY_TASK_CLASSES: self.librarian_queue.put(task_class()) Timestamp.touch(Timestamp.Choices.JANITOR) except Exception: self.log.exception(f"In {self.__class__.__name__}") def handle_task(self, task) -> None: """Run Janitor tasks as the librarian process directly.""" try: # Simple task dispatch if method_name := _JANITOR_METHOD_MAP.get(type(task)): method = getattr(self, method_name) method() return # Tasks with special parameters match task: case JanitorBackupTask(): self.backup_db(show_status=True) case JanitorIntegrityCheckTask(): self.integrity_check(long=task.long) case JanitorCodexUpdateTask(): self.update_codex(force=task.force) case _: self.log.warning(f"Janitor received unknown task {task}") except Exception: self.log.exception("Janitor task crashed.") ================================================ FILE: codex/librarian/scribe/janitor/scheduled_time.py ================================================ """Janitor Scheduled time.""" from datetime import datetime, time, timedelta from django.utils import timezone as django_timezone from loguru._logger import Logger def get_janitor_time(_log: Logger) -> datetime: """Get midnight relative to now.""" tomorrow = django_timezone.now() + timedelta(days=1) tomorrow = tomorrow.astimezone() return datetime.combine(tomorrow, time.min).astimezone() ================================================ FILE: codex/librarian/scribe/janitor/status.py ================================================ """Jantior Statii.""" from abc import ABC from codex.librarian.scribe.status import ScribeStatus class JanitorStatus(ScribeStatus, ABC): """Jantior Statii.""" class JanitorAdoptOrphanFoldersStatus(JanitorStatus): """Janitor Adopt Orphan Folders Status.""" CODE = "JAF" VERB = "Adopt" _verbed = "Adopted" ITEM_NAME = "orphan folders" class JanitorCleanupTagsStatus(JanitorStatus): """Janitor Cleanup Tags Status.""" CODE = "JCT" VERB = "Cleanup" _verbed = "Cleaned up" ITEM_NAME = "orphan tags" class JanitorCodexLatestVersionStatus(JanitorStatus): """Janitor Codex Latest Version Status.""" CODE = "JLV" VERB = "Check" _verbed = "Checked" ITEM_NAME = "Codex latest version" SINGLE = True class JanitorCodexUpdateStatus(JanitorStatus): """Janitor Update Codex Software.""" CODE = "JCU" VERB = "Update" ITEM_NAME = "Codex server software" SINGLE = True log_success = True class JanitorDBOptimizeStatus(JanitorStatus): """Janitor DB Optimize.""" CODE = "JDO" VERB = "Optimize" ITEM_NAME = "database" SINGLE = True log_success = True class JanitorDBBackupStatus(JanitorStatus): """Janitor DB Backup.""" CODE = "JDB" VERB = "Backup" _verbed = "Backed up" ITEM_NAME = "database" SINGLE = True class JanitorCleanupSessionsStatus(JanitorStatus): """Janitor Cleanup Sessions Status.""" CODE = "JRS" VERB = "Cleanup" _verbed = "Cleaned up" ITEM_NAME = "old sessions" class JanitorCleanupCoversStatus(JanitorStatus): """Janitor Cleanup Covers Status.""" CODE = "JRV" VERB = "Cleanup" _verbed = "Cleaned up" ITEM_NAME = "orphan covers" class JanitorCleanupBookmarksStatus(JanitorStatus): """Janitor Cleanup Bookmarks Status.""" CODE = "JRB" VERB = "Cleanup" _verbed = "Cleaned up" ITEM_NAME = "orphan bookmarks" class JanitorCleanupSettingsStatus(JanitorStatus): """Janitor Cleanup Settings Status.""" CODE = "JAS" VERB = "Cleanup" _verbed = "Cleaned up" ITEM_NAME = "orphan settings" class JanitorDBFKIntegrityStatus(JanitorStatus): """Janitor Check DB FK Integrity Status.""" CODE = "JIF" VERB = "Check" _verbed = "Checked" ITEM_NAME = "integrtity of database foreign keys" SINGLE = True class JanitorDBIntegrityStatus(JanitorStatus): """Janitor Check DB Integrity Status.""" CODE = "JID" VERB = "Check" _verbed = "Checked" ITEM_NAME = "integrity of entire database" SINGLE = True class JanitorDBFTSIntegrityStatus(JanitorStatus): """Janitor Check DB FTS Integrity Status.""" CODE = "JIS" VERB = "Check" _verbed = "Checked" ITEM_NAME = "integrity of full text virtual table" SINGLE = True class JanitorDBFTSRebuildStatus(JanitorStatus): """Janitor Rebuild DB FTS Status.""" CODE = "JSR" VERB = "Rebuild" _verbed = "Rebuilt" ITEM_NAME = "full text search virtual table" SINGLE = True log_success = True JANITOR_STATII = ( JanitorAdoptOrphanFoldersStatus, JanitorCleanupTagsStatus, JanitorCodexUpdateStatus, JanitorCodexLatestVersionStatus, JanitorDBOptimizeStatus, JanitorDBBackupStatus, JanitorCleanupSessionsStatus, JanitorCleanupCoversStatus, JanitorCleanupBookmarksStatus, JanitorCleanupSettingsStatus, JanitorDBFKIntegrityStatus, JanitorDBIntegrityStatus, JanitorDBFTSIntegrityStatus, JanitorDBFTSRebuildStatus, ) ================================================ FILE: codex/librarian/scribe/janitor/tasks.py ================================================ """Janitor Tasks.""" from dataclasses import dataclass from codex.librarian.scribe.tasks import ScribeTask class JanitorTask(ScribeTask): """Tasks for the janitor.""" @dataclass class JanitorCodexUpdateTask(JanitorTask): """Task for updater.""" force: bool = False class JanitorAdoptOrphanFoldersTask(JanitorTask): """Move orphaned folders into a correct tree position.""" class JanitorBackupTask(JanitorTask): """Backup the database.""" class JanitorVacuumTask(JanitorTask): """Vacuum the database.""" class JanitorCleanFKsTask(JanitorTask): """Clean unused foreign keys.""" class JanitorCleanCoversTask(JanitorTask): """Clean unused custom covers.""" class JanitorCleanupSessionsTask(JanitorTask): """Cleanup Session table.""" class JanitorCleanupBookmarksTask(JanitorTask): """Clean unused bookmarks.""" class JanitorCleanupSettingsTask(JanitorTask): """Clean orphan settings rows.""" class JanitorForeignKeyCheckTask(JanitorTask): """Check and repair foreign keys integrity.""" class JanitorImportForceAllFailedTask(JanitorTask): """Force update for failed imports in every library.""" @dataclass class JanitorIntegrityCheckTask(JanitorTask): """Check integrity and warn.""" long: bool = True class JanitorFTSIntegrityCheckTask(JanitorTask): """Check fts integrity.""" class JanitorFTSRebuildTask(JanitorTask): """Rebuild fts table in place.""" class JanitorNightlyTask(JanitorTask): """Submit all janitor nightly tasks to the queue.""" ================================================ FILE: codex/librarian/scribe/janitor/update.py ================================================ """Codex auto update.""" import subprocess import sys from packaging.version import Version from codex.choices.admin import AdminFlagChoices from codex.librarian.restarter.tasks import CodexRestartTask from codex.librarian.scribe.janitor.cleanup import JanitorCleanup from codex.librarian.scribe.janitor.status import JanitorCodexUpdateStatus from codex.models import AdminFlag from codex.models.admin import Timestamp from codex.version import VERSION, get_version class JanitorCodexUpdate(JanitorCleanup): """Auto Update codex methods for janitor.""" def _is_outdated(self) -> bool: """Is codex outdated.""" result = False if not VERSION: self.log.warning("Cannot determine installed Codex version.") return result ts = Timestamp.objects.get(key=Timestamp.Choices.CODEX_VERSION.value) latest_version = ts.version packaging_latest_version = Version(latest_version) installed_packaging_version = Version(VERSION) if ( packaging_latest_version.is_prerelease and not installed_packaging_version.is_prerelease ): pre_blurb = "latest version is a prerelease. But installed version is not." else: result = packaging_latest_version > installed_packaging_version pre_blurb = "" self.log.debug(f"{latest_version=} > {VERSION=} = {result}{pre_blurb}") return result def _update_codex(self, *, force: bool) -> None: if force: self.log.info("Forcing update of Codex.") else: eau = AdminFlag.objects.only("on").get( key=AdminFlagChoices.AUTO_UPDATE.value ) if not eau.on or not self._is_outdated(): self.log.info("Codex is up to date.") return self.log.info("Codex seems outdated. Trying to update.") args = ( sys.executable, "-m", "pip", "install", "--upgrade", "codex", ) subprocess.run( # noqa: S603 args, check=True, ) def update_codex(self, *, force: bool) -> None: """Update the package and restart everything if the version changed.""" status = JanitorCodexUpdateStatus() try: self.status_controller.start(status) self._update_codex(force=force) except Exception: self.log.exception("Updating Codex software") finally: self.status_controller.finish(status) # Restart if changed version. new_version = get_version() restart = new_version != VERSION if restart: self.log.success(f"Codex was updated from {VERSION} to {new_version}.") self.librarian_queue.put(CodexRestartTask()) else: reason = ( "Codex updated to the same version that was previously" f" installed: {VERSION}." ) self.log.info(reason) ================================================ FILE: codex/librarian/scribe/janitor/vacuum.py ================================================ """Vacuum the database.""" from django.db import connection from humanize import naturalsize from codex.librarian.scribe.janitor.integrity import JanitorIntegrity from codex.librarian.scribe.janitor.status import ( JanitorDBBackupStatus, JanitorDBOptimizeStatus, ) from codex.settings import BACKUP_DB_DIR, BACKUP_DB_PATH, DB_PATH _OLD_BACKUP_PATH = BACKUP_DB_PATH.with_suffix(BACKUP_DB_PATH.suffix + ".old") class JanitorVacuum(JanitorIntegrity): """Vacuum methods for janitor.""" def vacuum_db(self) -> None: """Vacuum the database and report on savings.""" status = JanitorDBOptimizeStatus() try: self.status_controller.start(status) old_size = DB_PATH.stat().st_size with connection.cursor() as cursor: cursor.execute("PRAGMA optimize") cursor.execute("VACUUM") cursor.execute("PRAGMA wal_checkpoint(TRUNCATE)") new_size = DB_PATH.stat().st_size saved = naturalsize(old_size - new_size) self.log.info(f"Vacuumed database. Saved {saved}.") finally: self.status_controller.finish(status) def backup_db(self, backup_path=BACKUP_DB_PATH, *, show_status: bool) -> None: """Backup the database.""" status = JanitorDBBackupStatus() if show_status else None try: if status: self.status_controller.start(status) BACKUP_DB_DIR.mkdir(exist_ok=True, parents=True) if backup_path.is_file(): backup_path.replace(_OLD_BACKUP_PATH) path = str(backup_path) with connection.cursor() as cursor: cursor.execute(f"VACUUM INTO {path!r}") _OLD_BACKUP_PATH.unlink(missing_ok=True) self.log.info(f"Backed up database to {path}") except Exception: self.log.exception("Backing up database.") finally: if status: self.status_controller.finish(status) ================================================ FILE: codex/librarian/scribe/lazy_importer.py ================================================ """Kick off an import task for one batch of books.""" from codex.choices.admin import AdminFlagChoices from codex.librarian.scribe.importer.tasks import ImportTask from codex.librarian.worker import WorkerBase from codex.models.admin import AdminFlag from codex.models.comic import Comic class LazyImporter(WorkerBase): """Kick off an import task for just these books.""" def lazy_import(self, task) -> None: """Kick off an import task for just these books.""" if not AdminFlag.objects.get( key=AdminFlagChoices.LAZY_IMPORT_METADATA.value ).on: self.log.debug("Lazy Import disabled by flag.") return if task.group == "c": comics = Comic.objects.filter( pk__in=task.pks, metadata_mtime__is_null=True ).only("path", "library_id") elif task.group == "f": comics = Comic.objects.filter( parent_folder__in=task.pks, metadata_mtime_is_null=True ).only("path", "library_id") else: self.log.warning(f"No lazy import enabled for group {task}") return # Map comics to libraries. library_path_map = {} for comic in comics: library_id = comic.library_id # pyright: ignore[reportAttributeAccessIssue] if library_id not in library_path_map: library_path_map[library_id] = set() library_path_map[library_id].add(comic.path) for library_id, paths in library_path_map.items(): # An abridged import task. if files_modified := frozenset(paths): task = ImportTask( library_id=library_id, files_modified=files_modified, force_import_metadata=True, ) self.librarian_queue.put(task) ================================================ FILE: codex/librarian/scribe/priority.py ================================================ """Priority for Scribe tasks in the PriorityQueue.""" from datetime import UTC, datetime from codex.librarian.scribe.importer.tasks import ( ImportTask, ) from codex.librarian.scribe.janitor.tasks import ( JanitorAdoptOrphanFoldersTask, JanitorBackupTask, JanitorCleanCoversTask, JanitorCleanFKsTask, JanitorCleanupBookmarksTask, JanitorCleanupSessionsTask, JanitorCleanupSettingsTask, JanitorCodexUpdateTask, JanitorForeignKeyCheckTask, JanitorFTSIntegrityCheckTask, JanitorFTSRebuildTask, JanitorImportForceAllFailedTask, JanitorIntegrityCheckTask, JanitorNightlyTask, JanitorVacuumTask, ) from codex.librarian.scribe.search.tasks import ( SearchIndexCleanStaleTask, SearchIndexClearTask, SearchIndexOptimizeTask, SearchIndexSyncTask, ) from codex.librarian.scribe.tasks import ( ImportAbortTask, LazyImportComicsTask, ScribeTask, SearchIndexSyncAbortTask, UpdateGroupsTask, ) _SCRIBE_TASK_PRIORITY = ( ImportAbortTask, SearchIndexSyncAbortTask, JanitorNightlyTask, JanitorCodexUpdateTask, JanitorAdoptOrphanFoldersTask, JanitorForeignKeyCheckTask, JanitorIntegrityCheckTask, JanitorFTSIntegrityCheckTask, JanitorFTSRebuildTask, JanitorImportForceAllFailedTask, ImportTask, LazyImportComicsTask, UpdateGroupsTask, JanitorCleanFKsTask, JanitorCleanCoversTask, JanitorCleanupSessionsTask, JanitorCleanupSettingsTask, JanitorCleanupBookmarksTask, SearchIndexClearTask, SearchIndexCleanStaleTask, SearchIndexSyncTask, SearchIndexOptimizeTask, JanitorVacuumTask, JanitorBackupTask, ) def get_task_priority(task: ScribeTask) -> tuple[int, float]: """Get task priority by index.""" now = datetime.now(tz=UTC).timestamp() priority = _SCRIBE_TASK_PRIORITY.index(type(task)) return priority, now ================================================ FILE: codex/librarian/scribe/scribed.py ================================================ """Bulk import and move comics and folders.""" from multiprocessing import Manager from queue import PriorityQueue from typing import override from codex.librarian.scribe.importer.importer import ComicImporter from codex.librarian.scribe.importer.tasks import ImportTask from codex.librarian.scribe.janitor.adopt_folders import OrphanFolderAdopter from codex.librarian.scribe.janitor.janitor import Janitor from codex.librarian.scribe.janitor.tasks import ( JanitorAdoptOrphanFoldersTask, JanitorFTSRebuildTask, JanitorTask, ) from codex.librarian.scribe.lazy_importer import LazyImporter from codex.librarian.scribe.priority import get_task_priority from codex.librarian.scribe.search.handler import SearchIndexer from codex.librarian.scribe.search.tasks import ( SearchIndexClearTask, SearchIndexerTask, ) from codex.librarian.scribe.tasks import ( CleanupAbortTask, ImportAbortTask, LazyImportComicsTask, SearchIndexSyncAbortTask, UpdateGroupsTask, ) from codex.librarian.scribe.timestamp_update import TimestampUpdater from codex.librarian.threads import QueuedThread ABORT_SEARCH_UPDATE_TASKS = ( SearchIndexClearTask, SearchIndexSyncAbortTask, JanitorFTSRebuildTask, ) class ScribeThread(QueuedThread): """A worker to handle all bulk database updates.""" SHUTDOWN_MSG = (0, QueuedThread.SHUTDOWN_MSG) def __init__(self, *args, **kwargs) -> None: """Initialize abort event.""" self.abort_import_event = Manager().Event() self.abort_search_update_event = Manager().Event() self.abort_cleanup_event = Manager().Event() super().__init__(*args, queue=PriorityQueue(), **kwargs) @override def process_item(self, item) -> None: """Run the updater.""" task = item[-1] match task: case ImportTask(): importer = ComicImporter( task, self.log, self.librarian_queue, self.db_write_lock, self.abort_import_event, ) importer.apply() case LazyImportComicsTask(): worker = LazyImporter( self.log, self.librarian_queue, self.db_write_lock ) worker.lazy_import(task) case UpdateGroupsTask(): worker = TimestampUpdater( self.log, self.librarian_queue, self.db_write_lock ) worker.update_groups(task) case JanitorAdoptOrphanFoldersTask(): worker = OrphanFolderAdopter( self.log, self.librarian_queue, self.db_write_lock, event=self.abort_import_event, ) worker.adopt_orphan_folders() case SearchIndexerTask(): worker = SearchIndexer( self.log, self.librarian_queue, self.db_write_lock, event=self.abort_search_update_event, ) worker.handle_task(task) case JanitorTask(): worker = Janitor( self.log, self.librarian_queue, self.db_write_lock, event=self.abort_cleanup_event, ) worker.handle_task(task) case _: self.log.warning(f"Bad task sent to scribe: {task}") def put(self, task) -> None: """Put item in queue, and signal events.""" if isinstance(task, ABORT_SEARCH_UPDATE_TASKS): self.abort_search_update_event.set() if isinstance(task, ImportTask | JanitorAdoptOrphanFoldersTask): self.abort_cleanup_event.set() self.log.debug("Abort cleanup db signal given.") elif isinstance(task, SearchIndexSyncAbortTask): self.log.debug( "Search Index Sync abort signal given. It may take a while for the current import chunk to finish." ) return elif isinstance(task, ImportAbortTask): self.abort_import_event.set() self.log.debug("Import abort signal given.") return elif isinstance(task, CleanupAbortTask): self.abort_cleanup_event.set() self.log.debug("Cleanup abort signal given.") return priority = get_task_priority(task) item = (priority, task) self.queue.put(item) ================================================ FILE: codex/librarian/scribe/search/__init__.py ================================================ """Search Indexer.""" ================================================ FILE: codex/librarian/scribe/search/const.py ================================================ """FTS constants.""" _COMICFTS_ATTRIBUTES = ( "collection_title", "name", "review", "summary", "updated_at", ) _COMICFTS_FKS = ( "publisher", "imprint", "series", "age_rating", "country", "language", "original_format", "scan_info", "tagger", ) _COMICFTS_M2MS = ( "characters", "credits", "genres", "locations", "series_groups", "sources", "stories", "story_arcs", "tags", "teams", "universes", ) COMICFTS_UPDATE_FIELDS = ( *_COMICFTS_ATTRIBUTES, *_COMICFTS_FKS, *_COMICFTS_M2MS, ) ================================================ FILE: codex/librarian/scribe/search/handler.py ================================================ """Handle search indexer tasks.""" from codex.librarian.scribe.search.sync import SearchIndexerSync from codex.librarian.scribe.search.tasks import ( SearchIndexCleanStaleTask, SearchIndexClearTask, SearchIndexerTask, SearchIndexOptimizeTask, SearchIndexSyncTask, ) class SearchIndexer(SearchIndexerSync): """Handle search indexer tasks.""" def handle_task(self, task: SearchIndexerTask) -> None: """Handle search indexer tasks.""" match task: case SearchIndexSyncTask(): self.update_search_index(rebuild=task.rebuild) case SearchIndexCleanStaleTask(): self.remove_stale_records() case SearchIndexOptimizeTask(): self.optimize() case SearchIndexClearTask(): self.clear_search_index() case _: self.log.warning(f"Bad task sent to scribe {task}") ================================================ FILE: codex/librarian/scribe/search/optimize.py ================================================ """Search Index cleanup.""" from django.db import connection from codex.librarian.scribe.search.status import SearchIndexOptimizeStatus from codex.librarian.worker import ( WorkerStatusAbortableBase, ) _TABLE = "codex_comicfts" _OPTIMIZE_SQL = f"INSERT INTO {_TABLE}({_TABLE}) VALUES('optimize')" class SearchIndexerOptimize(WorkerStatusAbortableBase): """Search Index optimize methods.""" def optimize(self) -> None: """Remove records not in the database from the index, trapping exceptions.""" status = SearchIndexOptimizeStatus() try: self.status_controller.start(status) with connection.cursor() as cursor: cursor.execute(_OPTIMIZE_SQL) except Exception: self.log.exception("Optimizing search index:") finally: self.status_controller.finish(status) ================================================ FILE: codex/librarian/scribe/search/prepare.py ================================================ """Prepare ComicFTS objects.""" from contextlib import suppress from types import MappingProxyType from comicbox.enums.comicbox import IdSources from comicbox.enums.maps.identifiers import ID_SOURCE_NAME_MAP from django.db.models.functions.datetime import Now from codex.librarian.status import Status from codex.models.comic import ComicFTS from codex.serializers.fields.browser import CountryField, LanguageField _PYCOUNTRY_FIELDS = MappingProxyType( {"country": CountryField(), "language": LanguageField()} ) _COMIC_KEYS = ( "collection_title", "country", "language", "name", "review", "summary", "fts_publisher", "fts_imprint", "fts_series", "fts_age_rating", "fts_original_format", "fts_scan_info", "fts_tagger", "fts_characters", "fts_credits", "fts_country", "fts_genres", "fts_sources", "fts_language", "fts_locations", "fts_series_groups", "fts_stories", "fts_story_arcs", "fts_tags", "fts_teams", "fts_universes", ) class SearchEntryPrepare: """Prepare ComicFTS objects.""" @staticmethod def _get_entry_str_value(entry: dict, key: str) -> str: value = entry.get(key) if not value: return "" if isinstance(value, tuple): value = value[0] if not value: return "" return value @staticmethod def _get_sources_fts_field(entry: dict) -> str: sources = entry.get("sources", ()) if not sources: return "" if isinstance(sources, str): sources = (sources,) names = set() for source_str in sources: if not source_str: continue names.add(source_str) with suppress(ValueError): id_source = IdSources(source_str) if long_name := ID_SOURCE_NAME_MAP.get(id_source): names.add(long_name) return ",".join(sorted(names)) @classmethod def _get_pycountry_fts_field(cls, entry, field_name) -> str: iso_code = cls._get_entry_str_value(entry, field_name) if not iso_code: return "" field = _PYCOUNTRY_FIELDS[field_name] return ",".join((iso_code, field.to_representation(iso_code))) @classmethod def _create_comicfts_entry_attributes(cls, entry, *, create: bool) -> None: now = Now() entry["updated_at"] = now if create: entry["created_at"] = now @classmethod def _create_comicfts_entry_fks(cls, entry) -> None: entry["country"] = cls._get_pycountry_fts_field(entry, "country") entry["language"] = cls._get_pycountry_fts_field(entry, "language") @classmethod def _create_comicfts_entry_m2ms(cls, entry, existing_values: dict | None) -> None: if sources := cls._get_sources_fts_field(entry): entry["sources"] = sources if existing_values: for field_name in tuple(existing_values.keys()): if values := existing_values.get(field_name): entry[field_name] = entry.get(field_name, ()) + values @classmethod def prepare_import_fts_entry( cls, comic_id: int, entry: dict, existing_m2m_values: dict | None, comicfts: ComicFTS | None, obj_list: list[ComicFTS], status: Status, *, create: bool, ) -> None: """Prepare ComicFTS object from import data.""" cls._create_comicfts_entry_m2ms(entry, existing_m2m_values) cls._create_comicfts_entry_fks(entry) for field_name in entry: value = entry[field_name] if isinstance(value, tuple): entry[field_name] = ",".join(sorted(value)) cls._create_comicfts_entry_attributes(entry, create=create) if comicfts: for field_name, value in entry.items(): setattr(comicfts, field_name, value) else: entry["comic_id"] = comic_id comicfts = ComicFTS(**entry) obj_list.append(comicfts) status.increment_complete() @classmethod def prepare_sync_fts_entry( cls, comic: dict, obj_list: list[ComicFTS], *, create: bool, ) -> None: """Prepare ComicFTS object from sync query data.""" entry = { key.removeprefix("fts_"): comic.get(key, "") for key in _COMIC_KEYS if comic.get(key) } if sources := cls._get_sources_fts_field(entry): entry["sources"] = sources cls._create_comicfts_entry_fks(entry) entry["universes"] = entry["universes"].strip(",") cls._create_comicfts_entry_attributes(entry, create=create) entry["comic_id"] = comic["id"] comicfts: ComicFTS = ComicFTS(**entry) obj_list.append(comicfts) ================================================ FILE: codex/librarian/scribe/search/remove.py ================================================ """Search Index cleanup.""" from django.db.models import F, Max from codex.librarian.scribe.search.optimize import SearchIndexerOptimize from codex.librarian.scribe.search.status import ( SearchIndexCleanStatus, SearchIndexClearStatus, ) from codex.models.comic import Comic, ComicFTS class SearchIndexerRemove(SearchIndexerOptimize): """Search Index cleanup methods.""" def clear_search_index(self) -> None: """Clear the search index.""" clear_status = SearchIndexClearStatus() self.status_controller.start(clear_status) ComicFTS.objects.all().delete() self.status_controller.finish(clear_status) def _remove_stale_records(self, status): """Remove records not in the database from the index.""" self.status_controller.start(status) self.log.debug("Finding stale records to remove...") delete_comicfts = ComicFTS.objects.exclude( comic_id__in=Comic.objects.only("pk") ) status.total = delete_comicfts.count() self.status_controller.update(status) if status.total: self.log.debug(f"Removing {status.total} stale records...") count, _ = delete_comicfts.delete() status.complete = count return count def remove_stale_records(self, *, log_success: bool = True) -> int: """Remove records not in the database from the index, trapping exceptions.""" count = 0 status = SearchIndexCleanStatus(log_success=log_success) try: count = self._remove_stale_records(status) except Exception: self.log.exception("Removing stale records:") finally: self.status_controller.finish(status) return count def remove_duplicate_records(self) -> int: """Remove duplicate FTS records.""" self.log.debug("Looking for duplicate search entries...") duplicates = ComicFTS.objects.annotate(max_updated_at=Max("updated_at")).filter( updated_at__lt=F("max_updated_at") ) num_dupes = duplicates.count() self.log.debug(f"Found {num_dupes} duplicate search entries") count = num_dupes if num_dupes: count, _ = duplicates.delete() if count: self.log.info(f"Deleted {count} duplicate search entries") return count ================================================ FILE: codex/librarian/scribe/search/status.py ================================================ """Search Index Sync Statii.""" from abc import ABC from codex.librarian.scribe.status import ScribeStatus class SearchIndexStatus(ScribeStatus, ABC): """Search Index Sync Statii.""" class SearchIndexClearStatus(SearchIndexStatus): """Search Index Clear Status.""" CODE = "SIX" VERB = "Clear" _verbed = "Cleared" ITEM_NAME = "full text search table" SINGLE = True log_success = True class SearchIndexCleanStatus(SearchIndexStatus): """Search Index Clean Status.""" CODE = "SIR" VERB = "Clean" _verbed = "Cleaned" ITEM_NAME = "orphan search entries" class SearchIndexOptimizeStatus(SearchIndexStatus): """Search Index Optimize Status.""" CODE = "SIO" VERB = "Optimize" ITEM_NAME = "search virtual table" SINGLE = True log_success = True class SearchIndexSyncUpdateStatus(SearchIndexStatus): """Search Index Sync Update Status.""" CODE = "SSU" VERB = "Sync" _verbed = "Synced" ITEM_NAME = "old search entries" class SearchIndexSyncCreateStatus(SearchIndexStatus): """Search Index Sync Create Status.""" CODE = "SSC" VERB = "Sync" _verbed = "Synced" ITEM_NAME = "new search entries" SEARCH_INDEX_STATII = ( SearchIndexClearStatus, SearchIndexCleanStatus, SearchIndexOptimizeStatus, SearchIndexSyncUpdateStatus, SearchIndexSyncCreateStatus, ) ================================================ FILE: codex/librarian/scribe/search/sync.py ================================================ """Search Index update.""" from datetime import datetime from math import floor from time import time from types import MappingProxyType from typing import TYPE_CHECKING from zoneinfo import ZoneInfo from django.db.models import Q from django.db.models.aggregates import Max from django.db.models.expressions import F, Value from django.db.models.functions import Concat from django.db.models.query import QuerySet from humanize import intcomma, naturaldelta from codex.librarian.memory import get_mem_limit from codex.librarian.scribe.search.const import COMICFTS_UPDATE_FIELDS from codex.librarian.scribe.search.prepare import SearchEntryPrepare from codex.librarian.scribe.search.remove import SearchIndexerRemove from codex.librarian.scribe.search.status import ( SEARCH_INDEX_STATII, SearchIndexCleanStatus, SearchIndexClearStatus, SearchIndexSyncCreateStatus, SearchIndexSyncUpdateStatus, ) from codex.models import Comic from codex.models.comic import ComicFTS from codex.models.functions import GroupConcat from codex.settings import IMPORTER_SEARCH_SYNC_BATCH_MEMORY_RATIO if TYPE_CHECKING: from codex.librarian.status import Status _MIN_UTC_DATE = datetime.min.replace(tzinfo=ZoneInfo("UTC")) _ALL_FTS_COMIC_IDS_QUERY = Q(pk__in=ComicFTS.objects.values_list("comic_id", flat=True)) _SIMPLE_FTS_FIELDS = ( # Group Fks "publisher", "imprint", "series", # Fks "age_rating", "country", "original_format", "language", "scan_info", "tagger", ) _SIMPLE_FTS_ANNOTATIONS = MappingProxyType( {f"fts_{rel}": F(f"{rel}__name") for rel in _SIMPLE_FTS_FIELDS} ) _M2M_FTS_RELS = ( "characters", "credits__person", "genres", "locations", "series_groups", "identifiers__source", "stories", "story_arc_numbers__story_arc", "tags", "teams", ) _M2M_FTS_ANNOTATIONS = MappingProxyType( { f"fts_{rel}": GroupConcat( f"{rel}__name", order_by=f"{rel}__name", distinct=True, ) for rel in _M2M_FTS_RELS } ) class SearchIndexerSync(SearchIndexerRemove): """Search Index update methods.""" def _init_statuses(self, rebuild) -> None: """Initialize all statuses order before starting.""" statii: list[Status] = [] if rebuild: statii.append( SearchIndexClearStatus(), ) else: statii.extend( [ SearchIndexCleanStatus(), SearchIndexSyncUpdateStatus(), ] ) statii.append(SearchIndexSyncCreateStatus()) self.status_controller.start_many(statii) def _update_search_index_clean(self, rebuild) -> None: """Clear or clean the search index.""" if rebuild: self.log.info("Rebuilding search index...") self.clear_search_index() else: self.remove_stale_records(log_success=False) @staticmethod def _select_related_fts_query(qs): return qs.select_related( "publisher", "imprint", "series", "country", "language", "scan_info", "tagger", ) @staticmethod def _prefetch_related_fts_query(qs): # Prefecthing deep relations breaks the 1000 sqlite query depth limit return qs.prefetch_related( "characters", "credits", # "credits__person", "identifiers", # "identifiers__source", "genres", "locations", "series_groups", "stories", "story_arc_numbers", # "story_arc_numbers__story_arc", "tags", "teams", "universes", ) @staticmethod def _annotate_fts_query(qs): return qs.annotate( **_SIMPLE_FTS_ANNOTATIONS, **_M2M_FTS_ANNOTATIONS, fts_universes=Concat( GroupConcat( "universes__designation", distinct=True, order_by="universes__designation", ), Value(","), GroupConcat( "universes__name", distinct=True, order_by="universes__name" ), ), ) def _update_search_index_operate_get_status( self, total_comics: int, chunk_human_size: str, *, create: bool ) -> SearchIndexSyncCreateStatus | SearchIndexSyncUpdateStatus: status_class = ( SearchIndexSyncCreateStatus if create else SearchIndexSyncUpdateStatus ) subtitle = f"Chunks of {chunk_human_size}" if total_comics else "" return status_class(total=total_comics, subtitle=subtitle) def _update_search_index_create_or_update( self, obj_list: list[ComicFTS], status, *, create: bool, ) -> None: if self.abort_event.is_set(): return verb = "create" if create else "update" verbing = (verb[:-1] + "ing").capitalize() num_comic_fts = len(obj_list) batch_position = f"({status.complete}/{status.total})" self.log.debug(f"{verbing} {num_comic_fts} {batch_position} search entries...") if create: ComicFTS.objects.bulk_create(obj_list) else: ComicFTS.objects.bulk_update(obj_list, COMICFTS_UPDATE_FIELDS) status.increment_complete(num_comic_fts) self.status_controller.update(status, notify=True) @staticmethod def _get_operation_comics_query(qs, *, create: bool): if create and not ComicFTS.objects.exists(): qs = Comic.objects.all() return qs def _update_search_index_operate( self, comics_filtered_qs: QuerySet, *, create: bool ): # Smaller systems may run out of virtual memory unless this is auto governed. mem_limit_gb = get_mem_limit("g") search_index_batch_size = floor( (mem_limit_gb / IMPORTER_SEARCH_SYNC_BATCH_MEMORY_RATIO) * 1000 ) chunk_human_size = intcomma(search_index_batch_size) verb = "create" if create else "update" self.log.debug(f"Counting total search index entries to {verb}...") total_comics = self._get_operation_comics_query( comics_filtered_qs, create=create ) total_comics = total_comics.count() status = self._update_search_index_operate_get_status( total_comics, chunk_human_size, create=create ) try: if not total_comics: self.log.debug(f"No search entries to {verb}.") return total_comics self.status_controller.start(status, notify=True) start = 0 while start < total_comics: obj_list = [] if self.abort_event.is_set(): break # Not using standard iterator chunking to control memory and really # do this in batches. self.log.debug( f"Preparing up to {chunk_human_size} comics for search indexing..." ) # This query is supposed to get only comics that don't need creating but it doesn't # So the start total exit assists it with that. comics = self._get_operation_comics_query( comics_filtered_qs, create=create ) comics = self._prefetch_related_fts_query(comics) comics = comics.order_by("pk") comics = comics[:search_index_batch_size] comics = self._annotate_fts_query(comics) for comic in comics.values(): SearchEntryPrepare.prepare_sync_fts_entry( comic, obj_list, create=create ) if not obj_list: break self._update_search_index_create_or_update( obj_list, status, create=create, ) start += search_index_batch_size finally: self.status_controller.finish(status) return total_comics def _update_search_index_update(self): """Update out of date search entries.""" out_of_date_comics = Comic.objects.filter(_ALL_FTS_COMIC_IDS_QUERY) self.log.debug("Looking for search index watermark...") fts_watermark = ComicFTS.objects.aggregate(max=Max("updated_at"))["max"] if fts_watermark: since = fts_watermark out_of_date_comics = out_of_date_comics.filter(updated_at__gt=fts_watermark) else: since = "the fracturing of the multiverse" fts_watermark = fts_watermark or _MIN_UTC_DATE self.log.info(f"Looking for search entries to update since {since}...") count = out_of_date_comics.count() self.log.debug(f"Found {count} comics with out of date search entries.") return self._update_search_index_operate(out_of_date_comics, create=False) def _update_search_index_create(self): """Create missing search entries.""" self.log.info("Looking for missing search entries to create...") missing_comics = Comic.objects.all() count = None if ComicFTS.objects.exists() else missing_comics.count() missing_comics = missing_comics.exclude(_ALL_FTS_COMIC_IDS_QUERY) if count is None: count = missing_comics.count() self.log.debug(f"Found {count} comics missing from the search index.") return self._update_search_index_operate(missing_comics, create=True) def _update_search_index(self, *, rebuild: bool) -> None: """Update or Rebuild the search index.""" self.log.debug("In update search index before init statii.") start_time = time() self._init_statuses(rebuild) if self.abort_event.is_set(): return cleaned_count = self._update_search_index_clean(rebuild) if self.abort_event.is_set(): return updated_count = self._update_search_index_update() if self.abort_event.is_set(): return created_count = self._update_search_index_create() elapsed_time = time() - start_time elapsed = naturaldelta(elapsed_time) if rebuild: cleaned = "cleared entire search index" elif cleaned_count: cleaned = f"cleaned {cleaned_count} stale entries" else: cleaned = "" updated = f"{updated_count} entries updated by sync" if updated_count else "" created = f"{created_count} entries created by sync" if created_count else "" summary_parts = filter(None, (cleaned, updated, created)) summary = ", ".join(summary_parts) if not summary: summary = "found to be already synced" self.log.success(f"Search index {summary} in {elapsed}.") def update_search_index(self, *, rebuild: bool) -> None: """Update or Rebuild the search index.""" self.abort_event.clear() try: self._update_search_index(rebuild=rebuild) except Exception: self.log.exception("Update search index") finally: if self.abort_event.is_set(): self.log.info("Search Index update aborted early.") self.abort_event.clear() self.status_controller.finish_many(SEARCH_INDEX_STATII) ================================================ FILE: codex/librarian/scribe/search/tasks.py ================================================ """Libarian Tasks for searchd.""" from dataclasses import dataclass from codex.librarian.scribe.tasks import ScribeTask class SearchIndexerTask(ScribeTask): """Tasks for the search indexer.""" @dataclass class SearchIndexSyncTask(SearchIndexerTask): """Update the search index.""" rebuild: bool = False class SearchIndexOptimizeTask(SearchIndexerTask): """Optimize search index.""" class SearchIndexCleanStaleTask(SearchIndexerTask): """Remove stale records.""" class SearchIndexClearTask(SearchIndexerTask): """Clear current search index.""" ================================================ FILE: codex/librarian/scribe/status.py ================================================ """Librarian Status for scribe bulk writes.""" from abc import ABC from codex.librarian.status import Status class ScribeStatus(Status, ABC): """Scribe Statii.""" class UpdateGroupTimestampsStatus(ScribeStatus): """Update Group Timestamps Status.""" CODE = "IGU" ITEM_NAME = "browser groups" VERB = "Update timestamps for" _verbed = "Updated timestamps for" SCRIBE_STATII = (UpdateGroupTimestampsStatus,) ================================================ FILE: codex/librarian/scribe/tasks.py ================================================ """DB Import Tasks.""" from dataclasses import dataclass from datetime import datetime from codex.librarian.tasks import LibrarianTask class ScribeTask(LibrarianTask): """Tasks for scribed.""" @dataclass class UpdateGroupsTask(ScribeTask): """Force the update of group timestamp.""" start_time: datetime | None = None @dataclass class LazyImportComicsTask(ScribeTask): """Lazy import of metadaa for existing comics.""" group: str pks: frozenset[int] class ImportAbortTask(ScribeTask): """Abort Import.""" class SearchIndexSyncAbortTask(ScribeTask): """Abort current search index sync.""" class CleanupAbortTask(ScribeTask): """Abort running cleanup/janitor tasks.""" ================================================ FILE: codex/librarian/scribe/timestamp_update.py ================================================ """Update Groups timestamp for cover cache busting.""" from collections.abc import Mapping from datetime import datetime from django.db.models import QuerySet from django.db.models.aggregates import Count from django.db.models.functions.datetime import Now from django.db.models.query_utils import Q from django.utils import timezone from codex.librarian.notifier.tasks import LIBRARY_CHANGED_TASK from codex.librarian.scribe.status import UpdateGroupTimestampsStatus from codex.librarian.worker import WorkerStatusBase from codex.models import StoryArc, Volume from codex.models.groups import BrowserGroupModel from codex.models.library import Library from codex.views.const import GROUP_MODELS _UPDATE_FIELDS = ("updated_at",) class TimestampUpdater(WorkerStatusBase): """Update Groups timestamp for cover cache busting.""" @staticmethod def _get_update_filter( model: type[BrowserGroupModel], start_time: datetime, force_update_group_map: Mapping, library: Library, ) -> Q: # Get groups with comics updated during this import rel = "storyarcnumber__" if model == StoryArc else "" updated_at_rel = rel + "comic__updated_at__gt" library_rel = rel + "comic__library" updated_filter = {library_rel: library, updated_at_rel: start_time} update_filter = Q(**updated_filter) # Get groups with custom covers updated during this import if model != Volume: update_filter |= Q(custom_cover__updated_at__gt=start_time) # Get groups to be force updated (usually those with deleted children) if pks := force_update_group_map.get(model): update_filter |= Q(pk__in=pks) return update_filter @staticmethod def _add_child_count_filter(qs: QuerySet, model: type[BrowserGroupModel]): """Filter out groups with no comics.""" rel_prefix = "storyarcnumber__" if model == StoryArc else "" rel_prefix += "comic" qs = qs.alias(child_count=Count(f"{rel_prefix}__pk", distinct=True)) return qs.filter(child_count__gt=0) @classmethod def _update_group_model( cls, force_update_group_map: Mapping, model: type[BrowserGroupModel], start_time: datetime, library: Library, log_list, ) -> int: """Update a single group model.""" update_filter = cls._get_update_filter( model, start_time, force_update_group_map, library ) qs = model.objects.filter(update_filter) qs = cls._add_child_count_filter(qs, model) qs = qs.distinct() qs = qs.only(*_UPDATE_FIELDS) updated = [] for obj in qs: obj.updated_at = Now() updated.append(obj) count = len(updated) if count: model.objects.bulk_update(updated, _UPDATE_FIELDS) log_list.append(f"{count} {model.__name__}s") return count def update_library_groups( self, library: Library, start_time: datetime, force_update_group_map: Mapping, *, mark_library_in_progress=False, ) -> int: """Update timestamps for each group for cover cache busting.""" total_count = 0 if mark_library_in_progress: library.start_update() status = UpdateGroupTimestampsStatus() self.status_controller.start(status) try: log_list = [] for model in GROUP_MODELS: count = self._update_group_model( force_update_group_map, model, start_time, library, log_list ) if not count: continue self.log.debug(f"Updated {count} {model.__name__}s timestamps.") status.increment_complete(count) self.status_controller.update(status) total_count += count finally: if mark_library_in_progress: library.end_update() self.status_controller.finish(status) return total_count def update_groups(self, task) -> None: """Update groups in all libraries.""" count = 0 start_time = task.start_time or timezone.now() libraries = Library.objects.filter(covers_only=False).only("pk") for library in libraries: count += self.update_library_groups( library, start_time, {}, mark_library_in_progress=True ) level = "INFO" if count else "DEBUG" self.log.log(level, f"Updated timestamps for {count} groups.") self.librarian_queue.put(LIBRARY_CHANGED_TASK) ================================================ FILE: codex/librarian/status.py ================================================ """Librarian Status dataclass.""" from abc import ABC from dataclasses import dataclass from time import time from typing import ClassVar from humanize import intword, naturaldelta @dataclass class Status(ABC): """Keep track of librarians status in memory.""" CODE: ClassVar[str] VERB: ClassVar[str] ITEM_NAME: ClassVar[str] SINGLE: ClassVar[bool] = False _title: ClassVar[str] = "" _verbed: ClassVar[str] = "" complete: int | None = None total: int | None = None since_updated: float = 0.0 subtitle: str = "" start_time: float | None = None log_success: bool = False @classmethod def title(cls) -> str: """Return created title.""" if not cls._title: title_parts = (cls.VERB, *cls.ITEM_NAME.split(" ")) title_parts = (part.capitalize() for part in title_parts) cls._title = " ".join(title_parts) return cls._title @classmethod def verbed(cls) -> str: """Return verbed, create it if it doesn't exist.""" if not cls._verbed: cls._verbed = cls.VERB + "d" return cls._verbed def increment_complete(self, count: int = 1) -> None: """Add count to complete.""" self.complete = self.complete + count if self.complete else count def decrement_total(self) -> None: """Decrement total if not not.""" self.total = max(self.total - 1, 0) if self.total is not None else None def start(self) -> None: """Set start time.""" self.start_time = time() def _elapsed(self): return time() - self.start_time if self.start_time else 0 def elapsed(self) -> str: """Elapsed time.""" return naturaldelta(self._elapsed()) def per_second(self) -> str: """Items per second.""" if self.SINGLE or self.total is None: return "" elapsed = self._elapsed() ips = intword(self.total / elapsed) if elapsed else "infinite" return f"{ips} {self.ITEM_NAME} per second" def reset(self) -> None: """Reset for batch statii.""" self.complete = 0 self.total = 0 self.start() ================================================ FILE: codex/librarian/status_controller.py ================================================ """Librarian Status.""" from collections.abc import Iterable from inspect import isclass from multiprocessing import Queue from time import time from types import MappingProxyType from typing import Any from django.db.models.functions.datetime import Now from django.db.models.query import Q from django.utils.timezone import datetime, now, timedelta from loguru._logger import Logger from codex.librarian.notifier.tasks import LIBRARIAN_STATUS_TASK from codex.librarian.status import Status from codex.models.admin import LibrarianStatus def get_default(field): """Get the default value for the model field.""" return LibrarianStatus._meta.get_field(field).get_default() DEFAULT_FIELDS = ("preactive", "complete", "total", "active", "subtitle") STATUS_DEFAULTS = {field: get_default(field) for field in DEFAULT_FIELDS} class StatusController: """Run operations on the LibrarianStatus table.""" _UPDATE_DELTA = 5 def __init__(self, logger_: Logger, librarian_queue: Queue) -> None: """Iinitialize logger and librarian queue.""" self.log = logger_ self.librarian_queue = librarian_queue def _enqueue_notifier_task(self, *, notify: bool = True) -> None: """Notify the status has changed.""" if not notify: return self.librarian_queue.put(LIBRARIAN_STATUS_TASK) def _loggit(self, level: str, status: Status) -> None: """Log with a ? in place of none.""" msg = f"{status.title()} {status.subtitle}".strip() msg += ": " if status.complete is None and status.total is None: msg += "In progress" else: count = "?" if status.complete is None else status.complete total = "?" if status.total is None else status.total msg += f"{count}/{total}" self.log.log(level, msg) def _update( self, status: Status, *, notify: bool, active: datetime | None = None, preactive: datetime | None = None, ) -> None: """Start a librarian status.""" try: updates: dict[str, Any] = { "complete": status.complete, "total": status.total, "updated_at": Now(), } if preactive is not None: updates["preactive"] = preactive if active: updates["active"] = active if status.subtitle: updates["subtitle"] = status.subtitle LibrarianStatus.objects.filter(status_type=status.CODE).update(**updates) self._enqueue_notifier_task(notify=notify) self._loggit("DEBUG", status) status.since_updated = time() except Exception: self.log.exception(f"Update status: {status.title()}") def start( self, status: Status, *, notify: bool = True, preactive: datetime | None = None, ) -> None: """Start a librarian status.""" status.start() self._update(status, notify=notify, preactive=preactive, active=now()) def start_many(self, statii: Iterable[Status | type[Status]]) -> None: """Start many librarian statuses.""" for index, status_or_class in enumerate(statii): status = status_or_class() if isclass(status_or_class) else status_or_class status.start() pad_ms = index * 100 # for order preactive = now() + timedelta(milliseconds=pad_ms) self._update(status, notify=False, preactive=preactive) self._enqueue_notifier_task(notify=True) def update(self, status: Status, *, notify: bool = True) -> None: """Update a librarian status.""" if time() - status.since_updated < self._UPDATE_DELTA: # noop unless time has expired. return self._update(status, notify=notify) def _log_finish(self, status: Status) -> None: """Log finish of status with stats.""" level = "INFO" suffix = "" if elapsed := status.elapsed(): suffix += f" in {elapsed}" if status.SINGLE: count = "" elif count := status.complete: count = str(count) if persecond := status.per_second(): suffix += f" at a rate of {persecond}" else: count = "no" level = "DEBUG" if status.log_success: level = "SUCCESS" prefix_parts = filter( None, (status.verbed(), count, status.ITEM_NAME, status.subtitle) ) prefix = " ".join(prefix_parts) self.log.log(level, f"{prefix}{suffix}.") def _finish_many_log(self, updated_statii, *, is_positive_statii: bool): """Log when done.""" if is_positive_statii: for status in updated_statii: if isinstance(status, Status): self._log_finish(status) else: self.log.info("Cleared all librarian statuses") def finish_many( self, statii: Iterable[Status | type[Status] | None], *, notify: bool = True, ) -> None: """Finish all librarian statuses.""" positive_statii: MappingProxyType[str, Status | type[Status]] = ( MappingProxyType({status.CODE: status for status in statii if status}) ) try: # Construct update query if statii and not positive_statii: # if statii has elements but they were all None, this is a noop. # But if statii was empty this is a finish all command. # This fires an extra LIBRARIAN_STATUS notification if none. idk if that's appropriate. return updates = {**STATUS_DEFAULTS, "updated_at": Now()} if positive_statii: # Finish specific statuses unconditionally by status_type. # Don't require active/preactive to be set — subtasks may # have already been individually finished. ls_filter = Q(status_type__in=positive_statii.keys()) else: # Clear-all: only touch rows that are currently active. ls_filter = Q(active__isnull=False) | Q(preactive__isnull=False) # Perform updates update_statii = LibrarianStatus.objects.filter(ls_filter) # Save statii for individual reporting. updated_statii = update_statii.values() update_statii.update(**updates) self._finish_many_log( updated_statii, is_positive_statii=bool(positive_statii) ) except Exception: self.log.exception(f"Finish status {positive_statii}") finally: self._enqueue_notifier_task(notify=notify) def finish(self, status: Status | None, *, notify: bool = True) -> None: """Finish a librarian status.""" try: self.finish_many((status,), notify=notify) except Exception as exc: self.log.warning(exc) ================================================ FILE: codex/librarian/tasks.py ================================================ """Librarian Tasks.""" from abc import ABC class LibrarianTask(ABC): # noqa: B024 """Generic Librarian Task.""" class LibrarianShutdownTask(LibrarianTask): """Signal task.""" class WakeCronTask(LibrarianTask): """Signal task.""" ================================================ FILE: codex/librarian/telemeter/__init__.py ================================================ """Telemeter.""" ================================================ FILE: codex/librarian/telemeter/scheduled_time.py ================================================ """Get telemeter send time.""" from datetime import UTC, datetime, timedelta from uuid import UUID from loguru._logger import Logger from codex.choices.admin import AdminFlagChoices from codex.librarian.telemeter.telemeter import get_telemeter_timestamp from codex.models.admin import AdminFlag # Timing _ONE_DAY = 24 * 60 * 60 _SECS_PER_WEEK = 7 * _ONE_DAY _MAX_UUID = 2**128 _UUID_DIVISOR = _MAX_UUID / _SECS_PER_WEEK def _get_utc_start_of_week(): """Get timestamp for this Monady 00:00:00.""" # Monday, now o'clock. utc_now = datetime.now(tz=UTC) start_of_week = utc_now - timedelta(days=utc_now.weekday()) # Monday, midnight. return start_of_week.replace(hour=0, minute=0, second=0, microsecond=0).astimezone( tz=UTC ) def _is_created_recently(ts) -> bool: """Don't send if created recently.""" now = datetime.now(tz=UTC) since = ts.created_at - now return abs(since.total_seconds()) < _ONE_DAY def _get_scheduled_time(ts) -> int | datetime: """Compute the time of week to send from the uuid.""" start_of_week = _get_utc_start_of_week() uuid = UUID(ts.version) uuid_int: int = int(uuid.int) seconds_after_week_start = uuid_int / _UUID_DIVISOR time_of_week = timedelta(seconds=seconds_after_week_start) telemeter_time = start_of_week + time_of_week telemeter_time = telemeter_time.astimezone(tz=UTC) if telemeter_time < ts.updated_at: # Already ran this week, or created this week after run time. telemeter_time = 0 return telemeter_time def get_telemeter_time(log: Logger) -> int | datetime: """Get the time to send telemetry.""" # Should we schedule telemeter at all? if ( not AdminFlag.objects.only("on") .get(key=AdminFlagChoices.SEND_TELEMETRY.value) .on ): log.trace("Telemeter disabled. Not scheduled.") return 0 ts = get_telemeter_timestamp() if _is_created_recently(ts): log.trace("Telemeter created recently. Not scheduled.") return 0 return _get_scheduled_time(ts) ================================================ FILE: codex/librarian/telemeter/stats.py ================================================ """Admin Flag View.""" from multiprocessing import cpu_count from pathlib import Path from platform import machine, python_version, release, system from types import MappingProxyType from caseconverter import snakecase from django.contrib.sessions.models import Session from django.db.models import Count from codex.models import ( Comic, Library, ) from codex.models.settings import SettingsBrowser, SettingsReader from codex.version import VERSION from codex.views.const import CONFIG_MODELS, METADATA_MODELS, STATS_GROUP_MODELS _KEY_MODELS_MAP = MappingProxyType( { "config": CONFIG_MODELS, "groups": STATS_GROUP_MODELS, "metadata": METADATA_MODELS, } ) _DOCKERENV_PATH = Path("/.dockerenv") _CGROUP_PATH = Path("/proc/self/cgroup") _USER_STATS = MappingProxyType( { "browser": { "model": SettingsBrowser, "keys": ("top_group", "order_by", "dynamic_covers"), }, "reader": { "model": SettingsReader, "keys": ( "finish_on_last_page", "fit_to", "reading_direction", ), }, } ) class CodexStats: """Collect codex stats.""" def __init__(self, params=None) -> None: """Specify which stats to collect. Default to all.""" if not params: params = {} self.params = params @classmethod def _is_docker(cls) -> bool: """Test if we're in a docker container.""" try: return _DOCKERENV_PATH.is_file() or "docker" in _CGROUP_PATH.read_text() except Exception: return False def _get_models(self, key) -> tuple: """Get models from request params.""" request_model_set = self.params.get(key, {}) all_models = _KEY_MODELS_MAP[key] if request_model_set: models = [ model for model in all_models for model_name in request_model_set if model.__name__.lower() == model_name.lower() ] else: models = all_models return tuple(models) def _get_model_counts(self, key) -> dict: """Get database counts of each model group.""" models = self._get_models(key) obj = {} for model in models: name = snakecase(model.__name__) + "_count" qs = model.objects if model == Library: qs = qs.filter(covers_only=False) obj[name] = qs.count() return obj @staticmethod def _aggregate_settings_instance(instance, subkeys, user_stats) -> None: for key in subkeys: value = getattr(instance, key, None) if value is None: continue if key not in user_stats: user_stats[key] = {} if value not in user_stats[key]: user_stats[key][value] = 0 user_stats[key][value] += 1 @classmethod def _get_session_stats(cls) -> tuple[dict, int]: """Return the number of anonymous sessions.""" sessions = Session.objects.all() anon_session_count = 0 for encoded_session in sessions: session = encoded_session.get_decoded() if not session.get("_auth_user_id"): anon_session_count += 1 user_stats = {} for info in _USER_STATS.values(): model = info["model"] subkeys = info["keys"] for instance in model.objects.all(): # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] cls._aggregate_settings_instance(instance, subkeys, user_stats) return user_stats, anon_session_count def _add_platform(self, obj) -> None: """Add dict of platform information to object.""" if self.params and "platform" not in self.params: return platform = { "docker": self._is_docker(), "machine": machine(), "cores": cpu_count(), "system": { "name": system(), "release": release(), }, "python_version": python_version(), "codex_version": VERSION, } obj["platform"] = platform def _add_config(self, obj) -> None: """Add dict of config informaation to object.""" if self.params and "config" not in self.params: return config = self._get_model_counts("config") sessions, config["user_anonymous_count"] = self._get_session_stats() config["user_registered_count"] = config.pop("users_count", 0) config["auth_group_count"] = config.pop("groups_count", 0) obj["config"] = config obj["sessions"] = sessions def _add_groups(self, obj) -> None: """Add dict of groups information to object.""" if self.params and "groups" not in self.params: return groups = self._get_model_counts("groups") groups["issue_count"] = groups.pop("comic_count", 0) obj["groups"] = groups def _add_file_types(self, obj) -> None: """Query for file types.""" if self.params and "file_types" not in self.params: return file_types = {} qs = ( Comic.objects.values("file_type") .annotate(count=Count("file_type")) .order_by() ) for query_group in qs: value = query_group["file_type"] name = value.lower() if value else "unknown" file_types[name] = query_group["count"] sorted_fts = dict(sorted(file_types.items())) obj["file_types"] = sorted_fts def _add_metadata(self, obj) -> None: """Add dict of metadata counts to object.""" if self.params and "metadata" not in self.params: return metadata = self._get_model_counts("metadata") obj["metadata"] = metadata def get(self) -> dict: """Construct the stats object.""" obj = {} self._add_platform(obj) self._add_config(obj) self._add_groups(obj) self._add_file_types(obj) self._add_metadata(obj) return obj ================================================ FILE: codex/librarian/telemeter/tasks.py ================================================ """Telemter tasks.""" from dataclasses import dataclass from codex.librarian.bookmark.tasks import BookmarkTask @dataclass class TelemeterTask(BookmarkTask): """Send telemetry.""" ================================================ FILE: codex/librarian/telemeter/telemeter.py ================================================ """Telemeter job.""" import json from base64 import a85decode from lzma import compress from urllib.request import Request, urlopen from uuid import uuid4 from codex.choices.admin import AdminFlagChoices from codex.librarian.telemeter.stats import CodexStats from codex.models.admin import AdminFlag, Timestamp # Version _APP_NAME = "codex" _VERSION = "1" # Sending # this isn't meant to fool you. it's meant to discourage lazy scraper bots. _BASE = "".join( ( a85decode(b"BQS?8F#ks-@:XCm@;\\+").decode(), a85decode(b"Ea`frF)to6Bk]hRFCB94/c").decode(), a85decode(b"@rGmhGV*rI@:Wqi/n&^<").decode(), ) ) _HEADERS = {"Content-Type": "application/xz"} _POST = _BASE + f"/stats/{_APP_NAME}/{_VERSION}" _TIMEOUT = 5 def get_telemeter_timestamp(): """Get or create timestamp.""" key = Timestamp.Choices.TELEMETER_SENT.value defaults = {"key": key} ts, _ = Timestamp.objects.get_or_create(defaults=defaults, key=key) if not ts.version: ts.version = str(uuid4()) ts.save() return ts def _post_stats(data) -> None: """Post telemetry to endpoint.""" data_json = json.dumps(data) json_bytes = data_json.encode() compressed_data = compress(json_bytes) request = Request(_POST, data=compressed_data, headers=_HEADERS, method="POST") # noqa: S310 response = urlopen(request, timeout=_TIMEOUT) # noqa: S310 response.raise_for_status() def _send_telemetry(uuid) -> None: """Send telemetry to server.""" if ( not AdminFlag.objects.only("on") .get(key=AdminFlagChoices.SEND_TELEMETRY.value) .on ): reason = "Send Telemetry flag is off." raise ValueError(reason) stats = CodexStats().get() data = {"stats": stats, "uuid": uuid} _post_stats(data) def send_telemetry(log) -> None: """Send anonymous telemetry during one window per week.""" try: ts = get_telemeter_timestamp() try: _send_telemetry(ts.version) except Exception as exc: log.debug(f"Failed to send anonymous stats: {exc}") # update updated_at, even on failure to prevent rapid rescheudling. ts.save() except Exception as exc: log.debug(f"Failed to get or set telemeter timestamp: {exc}") ================================================ FILE: codex/librarian/threads.py ================================================ """Abstract Thread worker for doing queued tasks.""" import time from abc import ABC, abstractmethod from multiprocessing.queues import Queue from queue import Empty, SimpleQueue from threading import Thread from typing import override from django.db import close_old_connections from loguru._logger import Logger from setproctitle import setproctitle from codex.librarian.worker import WorkerStatusMixin class BreakLoopError(Exception): """Simple way to break out of function nested loop.""" class NamedThread(Thread, WorkerStatusMixin, ABC): """A thread that sets its name for ps.""" SHUTDOWN_MSG: str | tuple = "shutdown" SHUTDOWN_TIMEOUT = 5 def __init__( self, logger_: Logger, librarian_queue: Queue, db_write_lock, name="", **kwargs, ) -> None: """Initialize queues.""" self.init_worker(logger_, librarian_queue, db_write_lock) if not name: name = self.__class__.__name__ super().__init__(name=name, **kwargs) def run_start(self) -> None: """First thing to do when running a new thread.""" self.log.debug(f"Started {self.name}") setproctitle(self.name) @override def join(self, timeout=None) -> None: """End the thread.""" self.log.debug(f"Waiting for {self.__class__.__name__} to join.") super().join(self.SHUTDOWN_TIMEOUT) self.log.debug(f"{self.__class__.__name__} joined.") def stop(self): """Noop.""" self.log.debug(f"Stop Requested {self.__class__.__name__}") class QueuedThread(NamedThread, ABC): """Abstract Thread worker for doing queued tasks.""" def __init__(self, *args, **kwargs) -> None: """Initialize with overridden name and as a daemon thread.""" self.queue = kwargs.pop("queue", SimpleQueue()) super().__init__(*args, daemon=True, **kwargs) @abstractmethod def process_item(self, item): """Process one item from the queue.""" raise NotImplementedError def get_timeout(self) -> float | None: """Set no timeout by default.""" return def timed_out(self): """Override to things on queue timeout.""" def _check_item(self) -> None: """Get items, with timeout. Check for shutdown and Empty.""" timeout = self.get_timeout() try: item = self.queue.get(timeout=timeout) if item == self.SHUTDOWN_MSG: raise BreakLoopError self.process_item(item) except Empty: self.timed_out() @override def run(self) -> None: """Run thread loop.""" self.run_start() while True: try: close_old_connections() self._check_item() except BreakLoopError: break except Exception: self.log.exception(f"{self.__class__.__name__} crashed:") self.log.debug(f"Stopped {self.__class__.__name__}") @override def stop(self) -> None: """Stop the thread.""" super().stop() self.queue.put(self.SHUTDOWN_MSG) class AggregateMessageQueuedThread(QueuedThread, ABC): """Abstract Thread worker for buffering and aggregating messages.""" FLOOD_DELAY = 1.0 MAX_DELAY = 5.0 def __init__(self, *args, **kwargs) -> None: """Initialize the cache.""" self.cache = {} self._last_send = time.time() super().__init__(*args, **kwargs) def set_last_send(self) -> None: """Set the last send time to now.""" self._last_send = time.time() @override def get_timeout(self): """Aggregate queue has a conditional timeout.""" return self.FLOOD_DELAY if self.cache else None @abstractmethod def aggregate_items(self, item): """Abstract method for aggregating items.""" raise NotImplementedError @abstractmethod def send_all_items(self): """Abstract method for sending all items.""" raise NotImplementedError def cleanup_cache(self, keys) -> None: """Remove sent messages from the cache and record send times.""" for key in keys: self.cache.pop(key, None) self.set_last_send() @override def process_item(self, item) -> None: """Aggregate items and sleep in case there are more.""" self.aggregate_items(item) since_last_timed_out = time.time() - self._last_send waited_too_long = since_last_timed_out > self.MAX_DELAY if waited_too_long: self.timed_out() @override def timed_out(self) -> None: """Send the items and set when we did this.""" self.send_all_items() self.set_last_send() ================================================ FILE: codex/librarian/worker.py ================================================ """Mixin for common librarian thread attributes.""" from multiprocessing.queues import Queue from typing import override from loguru._logger import Logger from codex.librarian.status_controller import StatusController class WorkerMixin: """Mixin for common thread attributes.""" def init_worker( self, /, logger_: Logger, librarian_queue: Queue, db_write_lock ) -> None: """Initialize queues.""" if not all((logger_, librarian_queue, db_write_lock)): reason = f"{logger_=}, {librarian_queue=}, and {db_write_lock=} must be passed in." raise ValueError(reason) self.log = logger_ # pyright: ignore[reportUninitializedInstanceVariable] self.librarian_queue = librarian_queue # pyright: ignore[reportUninitializedInstanceVariable] self.db_write_lock = db_write_lock # pyright: ignore[reportUninitializedInstanceVariable] class WorkerStatusMixin(WorkerMixin): """Worker mixin also sets up status controller.""" @override def init_worker(self, /, logger_, librarian_queue: Queue, db_write_lock) -> None: super().init_worker(logger_, librarian_queue, db_write_lock) self.status_controller = StatusController( # pyright: ignore[reportUninitializedInstanceVariable] logger_, librarian_queue ) class WorkerBase(WorkerMixin): """Base for Worker.""" def __init__(self, logger_, librarian_queue: Queue, db_write_lock) -> None: """Initialize Worker.""" super().__init__() self.init_worker(logger_, librarian_queue, db_write_lock) class WorkerStatusBase(WorkerStatusMixin): """Base for Status Worker.""" def __init__(self, logger_, librarian_queue: Queue, db_write_lock) -> None: """Initialize Worker.""" super().__init__() self.init_worker(logger_, librarian_queue, db_write_lock) class WorkerStatusAbortableBase(WorkerStatusBase): """Base for Abortable Status Worker.""" def __init__(self, logger_, librarian_queue: Queue, db_write_lock, event) -> None: """Initialize Abortable Worker.""" super().__init__(logger_, librarian_queue, db_write_lock) self.abort_event = event ================================================ FILE: codex/middleware.py ================================================ """Django middleware for codex.""" from base64 import b64decode from time import time from typing import Any from django.db import connection from django.utils import timezone from loguru import logger from codex.settings import ( DEBUG_LOG_AUTH_HEADERS, DEBUG_LOG_RESPONSE_TIME, DEBUG_SLOW_QUERY_LIMIT, ) from codex.version import PACKAGE_NAME, VERSION class CodexMiddleware: """Set Codex Headers.""" def __init__(self, get_response): """Initialize response method.""" self.get_response = get_response def __call__(self, request): """Set headers and timezones.""" # Fix timeszone from the django session.""" # https://docs.djangoproject.com/en/dev/topics/i18n/timezones/ if tzname := request.session.get("django_timezone"): timezone.activate(tzname) else: timezone.deactivate() # Set server header response = self.get_response(request) response["Server"] = f"{PACKAGE_NAME}/{VERSION}" return response class LogResponseTimeMiddleware: """Slow query Middleware.""" def __init__(self, get_response) -> None: """Set up get_response func.""" self.get_response = get_response def _log_response_time(self, request): """Log response times if slow or debug.""" start_time = time() response = self.get_response(request) response_time = time() - start_time is_slow = response_time > DEBUG_SLOW_QUERY_LIMIT if is_slow or DEBUG_LOG_RESPONSE_TIME: msg = f"{response_time}s {request.build_absolute_uri()}" if is_slow: logger.warning(msg) else: logger.trace(msg) return response def _log_query_times(self) -> None: """Log queries if slow or debug.""" for query in connection.queries: is_slow = float(query["time"]) > DEBUG_SLOW_QUERY_LIMIT if DEBUG_LOG_RESPONSE_TIME or is_slow: msg = f"{query['time']}s {query['sql']}" if is_slow: logger.warning(msg) else: logger.trace(msg) def __call__(self, request) -> Any: """Call request.""" response = self._log_response_time(request) self._log_query_times() return response class LogRequestMiddleware: """Log every request.""" def __init__(self, get_response) -> None: """Store the creation response.""" self.get_response = get_response def _log_auth_headers(self, request) -> None: if not DEBUG_LOG_AUTH_HEADERS: return filtered_headers = {} for key, value in request.headers.items(): if key.lower() in {"user-agent", "authorization", "cookie"}: if key.lower().startswith("auth"): parts = value.split(" ") if parts[0] == "Basic": parts[1] = b64decode(parts[1]).decode() final_val = " ".join(parts) else: final_val = value else: final_val = value filtered_headers[key] = final_val logger.trace(filtered_headers) def __call__(self, request) -> Any: """Trace the request uri.""" uri = request.build_absolute_uri() # Includes query parameters logger.trace(uri) self._log_auth_headers(request) if data := getattr(request, "data", None): logger.trace(data) return self.get_response(request) ================================================ FILE: codex/migrations/0001_init.py ================================================ """Generated by Django 3.1 on 2020-08-21 18:59.""" import datetime from decimal import Decimal import django.db.models.deletion from django.conf import settings from django.db import migrations, models import codex.models class Migration(migrations.Migration): """Initial schema.""" initial = True dependencies = [ ("sessions", "0001_initial"), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name="Character", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Imprint", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("is_default", models.BooleanField(default=False)), ("sort_name", models.CharField(max_length=32)), ("name", models.CharField(default="Main Imprint", max_length=32)), ], ), migrations.CreateModel( name="Library", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "path", models.CharField( db_index=True, max_length=128, unique=True, validators=[codex.models.validate_dir_exists], ), ), ("enable_watch", models.BooleanField(db_index=True, default=True)), ("enable_scan_cron", models.BooleanField(db_index=True, default=True)), ( "scan_frequency", models.DurationField(default=datetime.timedelta(seconds=43200)), ), ("last_scan", models.DateTimeField(null=True)), ("scan_in_progress", models.BooleanField(default=False)), ("schema_version", models.PositiveSmallIntegerField(default=0)), ], options={ "abstract": False, }, ), migrations.CreateModel( name="Publisher", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("is_default", models.BooleanField(default=False)), ("sort_name", models.CharField(db_index=True, max_length=32)), ("name", models.CharField(default="No Publisher", max_length=32)), ], options={ "unique_together": {("name", "is_default")}, }, ), migrations.CreateModel( name="Series", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("is_default", models.BooleanField(default=False)), ("sort_name", models.CharField(db_index=True, max_length=32)), ("name", models.CharField(default="Default Series", max_length=32)), ("volume_count", models.PositiveSmallIntegerField(null=True)), ( "imprint", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.imprint" ), ), ( "publisher", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.publisher", ), ), ], options={ "unique_together": {("name", "imprint", "is_default")}, }, ), migrations.CreateModel( name="Volume", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("is_default", models.BooleanField(default=False)), ("sort_name", models.CharField(db_index=True, max_length=32)), ("name", models.CharField(default="", max_length=32)), ( "issue_count", models.DecimalField(decimal_places=2, max_digits=6, null=True), ), ( "imprint", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.imprint" ), ), ( "publisher", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.publisher", ), ), ( "series", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.series" ), ), ], options={ "unique_together": {("name", "series", "is_default")}, }, ), migrations.CreateModel( name="Team", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Tag", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="StoryArc", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="SeriesGroup", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Location", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.AddField( model_name="imprint", name="publisher", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.publisher", ), ), migrations.CreateModel( name="Genre", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Folder", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ( "path", models.CharField( db_index=True, max_length=128, validators=[codex.models.validate_dir_exists], ), ), ("sort_name", models.CharField(max_length=32)), ( "library", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.library" ), ), ( "parent_folder", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.folder", ), ), ], options={ "unique_together": {("library", "path")}, }, ), migrations.CreateModel( name="CreditRole", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="CreditPerson", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Credit", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "person", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.creditperson", ), ), ( "role", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.creditrole", ), ), ], options={ "unique_together": {("person", "role")}, }, ), migrations.CreateModel( name="Comic", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("path", models.CharField(db_index=True, max_length=128)), ( "issue", models.DecimalField( db_index=True, decimal_places=2, default=Decimal("0.0"), max_digits=6, ), ), ("title", models.CharField(db_index=True, max_length=64, null=True)), ("year", models.PositiveSmallIntegerField(null=True)), ("month", models.PositiveSmallIntegerField(null=True)), ("day", models.PositiveSmallIntegerField(null=True)), ("summary", models.TextField(null=True)), ("notes", models.TextField(null=True)), ("description", models.TextField(null=True)), ( "critical_rating", models.CharField(db_index=True, max_length=32, null=True), ), ( "maturity_rating", models.CharField(db_index=True, max_length=32, null=True), ), ( "user_rating", models.CharField(db_index=True, max_length=32, null=True), ), ("country", models.CharField(db_index=True, max_length=32, null=True)), ("language", models.CharField(db_index=True, max_length=16, null=True)), ( "page_count", models.PositiveSmallIntegerField(db_index=True, default=0), ), ("cover_image", models.CharField(max_length=64, null=True)), ("read_ltr", models.BooleanField(default=True)), ("web", models.URLField(null=True)), ("format", models.CharField(max_length=16, null=True)), ("scan_info", models.CharField(max_length=32, null=True)), ("sort_name", models.CharField(db_index=True, max_length=32)), ("date", models.DateField(db_index=True, null=True)), ("decade", models.PositiveSmallIntegerField(db_index=True, null=True)), ("size", models.PositiveSmallIntegerField(db_index=True)), ("max_page", models.PositiveSmallIntegerField(default=0)), ("cover_path", models.CharField(max_length=32)), ("characters", models.ManyToManyField(to="codex.Character")), ("credits", models.ManyToManyField(to="codex.Credit")), ("folder", models.ManyToManyField(to="codex.Folder")), ("genres", models.ManyToManyField(to="codex.Genre")), ( "imprint", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.imprint" ), ), ( "library", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.library" ), ), ("locations", models.ManyToManyField(to="codex.Location")), ( "myself", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, related_name="comic", to="codex.comic", ), ), ( "parent_folder", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, related_name="comic_in", to="codex.folder", ), ), ( "publisher", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.publisher", ), ), ( "series", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.series" ), ), ("series_groups", models.ManyToManyField(to="codex.SeriesGroup")), ("story_arcs", models.ManyToManyField(to="codex.StoryArc")), ("tags", models.ManyToManyField(to="codex.Tag")), ("teams", models.ManyToManyField(to="codex.Team")), ( "volume", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.volume" ), ), ], options={ "unique_together": {("path", "volume", "year", "issue")}, }, ), migrations.AlterUniqueTogether( name="imprint", unique_together={("name", "publisher", "is_default")}, ), migrations.CreateModel( name="AdminFlag", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32)), ("on", models.BooleanField(default=True)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.AlterField( model_name="imprint", name="sort_name", field=models.CharField(db_index=True, max_length=32), ), migrations.CreateModel( name="UserBookmark", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "bookmark", models.PositiveSmallIntegerField(db_index=True, null=True), ), ("finished", models.BooleanField(db_index=True, default=False)), ( "fit_to", models.CharField( default=None, max_length=6, null=True, # Old code dependent validators removed in the future ), ), ("two_pages", models.BooleanField(default=None, null=True)), ( "comic", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.comic" ), ), ( "session", models.ForeignKey( null=True, on_delete=codex.models.cascade_if_user_null, to="sessions.session", ), ), ( "user", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ], options={ "unique_together": {("user", "session", "comic")}, }, ), ] ================================================ FILE: codex/migrations/0002_auto_20200826_0622.py ================================================ """Generated by Django 3.1 on 2020-08-26 06:22.""" from django.db import migrations class Migration(migrations.Migration): """Change libraries verbose name.""" dependencies = [ ("codex", "0001_init"), ] operations = [ migrations.AlterModelOptions( name="library", options={"verbose_name_plural": "libraries"}, ), ] ================================================ FILE: codex/migrations/0003_auto_20200831_2033.py ================================================ """Generated by Django 3.1 on 2020-08-31 20:33.""" import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Credit roles can be none.""" dependencies = [ ("codex", "0002_auto_20200826_0622"), ] operations = [ migrations.AlterField( model_name="credit", name="role", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.creditrole", ), ), ] ================================================ FILE: codex/migrations/0004_failedimport.py ================================================ """Generated by Django 3.1.1 on 2020-09-14 22:15.""" import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Keep track of failed imports.""" dependencies = [ ("codex", "0003_auto_20200831_2033"), ] operations = [ migrations.CreateModel( name="FailedImport", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("path", models.CharField(db_index=True, max_length=128)), ("reason", models.CharField(max_length=64)), ( "library", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.library" ), ), ], options={ "unique_together": {("library", "path")}, }, ), ] ================================================ FILE: codex/migrations/0005_auto_20200918_0146.py ================================================ """Generated by Django 3.1.1 on 2020-09-18 01:46.""" from django.db import migrations class Migration(migrations.Migration): """Update verbose names.""" dependencies = [ ("codex", "0004_failedimport"), ] operations = [ migrations.AlterModelOptions( name="comic", options={"verbose_name": "Issue"}, ), migrations.AlterModelOptions( name="series", options={"verbose_name_plural": "Series"}, ), ] ================================================ FILE: codex/migrations/0006_update_default_names_and_remove_duplicate_comics.py ================================================ """Generated by Django 3.2.9 on 2021-11-04 03:03.""" from types import MappingProxyType from django.db import migrations from django.db.models.functions import Now MODEL_NAMES = MappingProxyType( { "Series": "Default Series", "Imprint": "Main Imprint", "Publisher": "No Publisher", } ) NEW_DEFAULT_NAME = "" UPDATE_FIELDS = ("stat", "updated_at") def update_default_names(apps, _schema_editor) -> None: """Prepare for removing the is_default field.""" for model_name, default_name in MODEL_NAMES.items(): model = apps.get_model("codex", model_name) model.objects.filter(name=NEW_DEFAULT_NAME, is_default=False).update( name="UNKNOWN", updated_at=Now() ) model.objects.filter(name=default_name, is_default=True).update( name=NEW_DEFAULT_NAME, updated_at=Now() ) def remove_duplicate_comics(apps, _schema_editor) -> None: """Remove duplicate comics in preparation for looser unique constraints.""" model = apps.get_model("codex", "Comic") comics = model.objects.only("pk", "library__id", "path", "updated_at") unique = {} update_comics = {} delete_comics = set() now = Now() for comic in comics: dupe_id = (comic.library_id, comic.path) update_comic = unique.get(dupe_id) if update_comic: if update_comic.pk not in update_comics: update_comic.stat = None update_comic.updated_at = now update_comics[update_comic.pk] = update_comic delete_comics.add(comic.pk) else: unique[dupe_id] = comic if not delete_comics and not update_comics: return print() if delete_comics: print(f"Deleting {len(delete_comics)} duplicate comics from database.") model.objects.filter(pk__in=delete_comics).delete() if update_comics: print(f"Marking {len(update_comics)} comics to be updated by the next poll.") model.objects.bulk_update(update_comics.values(), fields=UPDATE_FIELDS) class Migration(migrations.Migration): """Change default names to ''.""" dependencies = [ ("codex", "0005_auto_20200918_0146"), ] operations = [ migrations.RunPython(update_default_names), migrations.RunPython(remove_duplicate_comics), ] ================================================ FILE: codex/migrations/0007_auto_20211210_1710.py ================================================ """Generated by Django 3.2.9 on 2021-12-11 01:10.""" import datetime import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Large migration for v0.7.0.""" dependencies = [ ("codex", "0006_update_default_names_and_remove_duplicate_comics"), ] operations = [ migrations.CreateModel( name="LatestVersion", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("version", models.CharField(max_length=32)), ], options={ "abstract": False, }, ), migrations.RenameField( model_name="comic", old_name="folder", new_name="folders", ), migrations.RenameField( model_name="library", old_name="enable_watch", new_name="events", ), migrations.RenameField( model_name="library", old_name="last_scan", new_name="last_poll", ), migrations.RenameField( model_name="library", old_name="enable_scan_cron", new_name="poll", ), migrations.RenameField( model_name="library", old_name="scan_in_progress", new_name="update_in_progress", ), migrations.RemoveField( model_name="failedimport", name="reason", ), migrations.RemoveField( model_name="library", name="scan_frequency", ), # This was changed from add name, remove title for django 4.1 tests migrations.AlterField( model_name="comic", name="title", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.RenameField(model_name="comic", old_name="title", new_name="name"), migrations.AddField( model_name="comic", name="stat", field=models.JSONField(null=True), ), migrations.AddField( model_name="failedimport", name="name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AddField( model_name="failedimport", name="parent_folder", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.folder", ), ), migrations.AddField( model_name="failedimport", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AddField( model_name="failedimport", name="stat", field=models.JSONField(null=True), ), migrations.AddField( model_name="folder", name="stat", field=models.JSONField(null=True), ), migrations.AddField( model_name="library", name="poll_every", field=models.DurationField(default=datetime.timedelta(seconds=3600)), ), migrations.AlterField( model_name="comic", name="size", field=models.PositiveIntegerField(db_index=True), ), migrations.AlterField( model_name="comic", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="folder", name="name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="folder", name="path", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="folder", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="imprint", name="name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="imprint", name="publisher", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.publisher" ), ), migrations.AlterField( model_name="imprint", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="publisher", name="name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="publisher", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="series", name="name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="series", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="volume", name="name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="volume", name="sort_name", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterUniqueTogether( name="comic", unique_together={("library", "path")}, ), migrations.AlterUniqueTogether( name="imprint", unique_together={("name", "publisher")}, ), migrations.AlterUniqueTogether( name="publisher", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="series", unique_together={("name", "imprint")}, ), migrations.AlterUniqueTogether( name="volume", unique_together={("name", "series")}, ), migrations.RemoveField( model_name="comic", name="myself", ), migrations.RemoveField( model_name="imprint", name="is_default", ), migrations.RemoveField( model_name="publisher", name="is_default", ), migrations.RemoveField( model_name="series", name="is_default", ), migrations.RemoveField( model_name="volume", name="is_default", ), ] ================================================ FILE: codex/migrations/0008_alter_comic_created_at_alter_comic_format_and_more.py ================================================ """Generated by Django 4.0 on 2021-12-17 04:36.""" import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Indexes for filtered and sorted comic fields.""" dependencies = [ ("codex", "0007_auto_20211210_1710"), ] operations = [ migrations.AlterField( model_name="comic", name="created_at", field=models.DateTimeField(auto_now_add=True, db_index=True), ), migrations.AlterField( model_name="comic", name="format", field=models.CharField(db_index=True, max_length=16, null=True), ), migrations.AlterField( model_name="comic", name="parent_folder", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.folder", ), ), migrations.AlterField( model_name="comic", name="read_ltr", field=models.BooleanField(db_index=True, default=True), ), migrations.AlterField( model_name="comic", name="updated_at", field=models.DateTimeField(auto_now=True, db_index=True), ), migrations.AlterField( model_name="comic", name="year", field=models.PositiveSmallIntegerField(db_index=True, null=True), ), ] ================================================ FILE: codex/migrations/0009_alter_comic_parent_folder.py ================================================ """Generated by Django 4.0 on 2021-12-19 17:47.""" import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Override related_in becauese interferes with comic.folders.""" dependencies = [ ("codex", "0008_alter_comic_created_at_alter_comic_format_and_more"), ] operations = [ migrations.AlterField( model_name="comic", name="parent_folder", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, related_name="comic_in", to="codex.folder", ), ), ] ================================================ FILE: codex/migrations/0010_haystack.py ================================================ """Generated by Django 4.0.1 on 2022-01-16 05:31.""" import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Haystack search engine.""" dependencies = [ ("codex", "0009_alter_comic_parent_folder"), ] operations = [ migrations.CreateModel( # Fake model. Not managed. name="QueueJob", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ], options={ "db_tablespace": "temp", "managed": False, }, ), migrations.CreateModel( name="SearchQuery", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("text", models.CharField(db_index=True, max_length=256, unique=True)), ("used_at", models.DateTimeField(auto_now_add=True, db_index=True)), ], ), migrations.AlterModelOptions( name="latestversion", options={"get_latest_by": "updated_at"}, ), migrations.CreateModel( name="SearchResult", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("score", models.PositiveSmallIntegerField()), ( "comic", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.comic" ), ), ( "query", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.searchquery", ), ), ], ), migrations.AddIndex( model_name="searchresult", index=models.Index( fields=["comic", "query"], name="codex_searc_comic_i_dd09ab_idx" ), ), migrations.AlterUniqueTogether( name="searchresult", unique_together={("query", "comic")}, ), ] ================================================ FILE: codex/migrations/0011_library_groups_and_metadata_changes.py ================================================ """Generated by Django 4.0.1 on 2022-01-31 22:09 & tweaked by aj.""" from decimal import Decimal from django.db import migrations, models def critical_rating_to_decimal(apps, _schema_editor) -> None: """Migrate comics with charfield ratings to decimal if possible.""" comic_model = apps.get_model("codex", "comic") update_comics = [] comics_with_ratings = comic_model.objects.exclude( critical_rating__isnull=True, critical_rating="" ).only("pk", "critical_rating", "critical_rating_decimal") for comic in comics_with_ratings: try: comic.critical_rating_decimal = Decimal(comic.critical_rating) update_comics.append(comic) except Exception as err: reason = ( f"WARNING: comic {comic.pk} {comic.path} failed converting " f"critical_rating to Decimal {err}" ) print(reason) comic_model.objects.bulk_update(update_comics, ("critical_rating_decimal",)) class Migration(migrations.Migration): """Library group ACLS and metadata changes.""" dependencies = [ ("auth", "0012_alter_user_first_name_max_length"), ("codex", "0010_haystack"), ] operations = [ migrations.AddField( model_name="comic", name="community_rating", field=models.DecimalField( db_index=True, decimal_places=2, default=None, max_digits=4, null=True ), ), migrations.AddField( model_name="library", name="groups", field=models.ManyToManyField(blank=True, to="auth.Group"), ), migrations.AddField( model_name="comic", name="critical_rating_decimal", field=models.DecimalField( db_index=True, decimal_places=2, default=None, max_digits=4, null=True ), ), migrations.RunPython(critical_rating_to_decimal), migrations.AlterField( # Added in django 4.1 to remove index before drop model_name="comic", name="critical_rating", field=models.CharField(db_index=False, max_length=32, null=True), ), migrations.RemoveField(model_name="comic", name="critical_rating"), migrations.RenameField( model_name="comic", old_name="critical_rating_decimal", new_name="critical_rating", ), migrations.RenameField( model_name="comic", old_name="maturity_rating", new_name="age_rating", ), migrations.AlterField( # Added in django 4.1 to remove index before drop model_name="comic", name="user_rating", field=models.CharField(db_index=False, max_length=32, null=True), ), migrations.RemoveField( model_name="comic", name="user_rating", ), ] ================================================ FILE: codex/migrations/0012_rename_description_comic_comments.py ================================================ """Generated by Django 4.0.2 on 2022-02-24 20:58.""" from django.db import migrations class Migration(migrations.Migration): """Rename comic description to comic comments.""" dependencies = [ ("codex", "0011_library_groups_and_metadata_changes"), ] operations = [ migrations.RenameField( model_name="comic", old_name="description", new_name="comments", ), ] ================================================ FILE: codex/migrations/0013_int_issue_count_longer_charfields.py ================================================ """Generated by Django 4.0.2 on 2022-03-25 23:16.""" from decimal import Decimal from django.db import migrations, models def cast_issue_count(apps, _schema_editor) -> None: """Round issue counts to integer.""" volume_model = apps.get_model("codex", "volume") volumes = volume_model.objects.filter(issue_count_decimal__isnull=False).only( "name", "issue_count_decimal", "issue_count" ) update_volumes = [] for volume in volumes: try: volume.issue_count = round(volume.issue_count_decimal) update_volumes.append(volume) except Exception: reason = ( f"unable to cast volume {volume.name} " f"issue_count {volume.old_issue_count} to int" ) print(reason) volume_model.objects.bulk_update(update_volumes, ("issue_count",)) class Migration(migrations.Migration): """Larger valid fields.""" dependencies = [ ("codex", "0012_rename_description_comic_comments"), ] operations = [ migrations.RenameField( model_name="volume", old_name="issue_count", new_name="issue_count_decimal" ), migrations.AddField( model_name="volume", name="issue_count", field=models.PositiveSmallIntegerField(null=True), ), migrations.RunPython(cast_issue_count), migrations.RemoveField(model_name="volume", name="issue_count_decimal"), migrations.AlterField( model_name="adminflag", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="character", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="comic", name="community_rating", field=models.DecimalField( db_index=True, decimal_places=2, default=None, max_digits=5, null=True ), ), migrations.AlterField( model_name="comic", name="cover_image", field=models.CharField(max_length=256, null=True), ), migrations.AlterField( model_name="comic", name="cover_path", field=models.CharField(max_length=4095), ), migrations.AlterField( model_name="comic", name="critical_rating", field=models.DecimalField( db_index=True, decimal_places=2, default=None, max_digits=5, null=True ), ), migrations.AlterField( model_name="comic", name="issue", field=models.DecimalField( db_index=True, decimal_places=2, default=Decimal(0), max_digits=10 ), ), migrations.AlterField( model_name="comic", name="language", field=models.CharField(db_index=True, max_length=32, null=True), ), migrations.AlterField( model_name="comic", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="comic", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="comic", name="scan_info", field=models.CharField(max_length=128, null=True), ), migrations.AlterField( model_name="comic", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), migrations.AlterField( model_name="creditperson", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="creditrole", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="failedimport", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="failedimport", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="failedimport", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), migrations.AlterField( model_name="folder", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="folder", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="folder", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), migrations.AlterField( model_name="genre", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="imprint", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="imprint", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), migrations.AlterField( model_name="location", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="publisher", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="publisher", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), migrations.AlterField( model_name="series", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="series", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), migrations.AlterField( model_name="seriesgroup", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="storyarc", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="tag", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="team", name="name", field=models.CharField(db_index=True, max_length=64), ), migrations.AlterField( model_name="volume", name="name", field=models.CharField(db_index=True, default="", max_length=64), ), migrations.AlterField( model_name="volume", name="sort_name", field=models.CharField(db_index=True, default="", max_length=130), ), ] ================================================ FILE: codex/migrations/0014_pdf_issue_suffix_remove_cover_image_sort_name.py ================================================ """Generated by Django 4.0.4 on 2022-04-26 03:09.""" from pathlib import Path from django.db import migrations, models def add_library_folders(apps, _schema_editor) -> None: """Add library folders if they're missing.""" folder_model = apps.get_model("codex", "folder") top_folders = folder_model.objects.filter(parent_folder=None) # Create missing library folders top_folder_paths = top_folders.values_list("path", flat=True) library_model = apps.get_model("codex", "library") libraries_missing_top_folders = library_model.objects.exclude( path__in=top_folder_paths ) create_folders = [] for library in libraries_missing_top_folders: path = library.path name = Path(library.path).name folder = folder_model(library=library, path=path, name=name) create_folders.append(folder) if create_folders: print("\ncreating library folders...") new_folders = folder_model.objects.bulk_create(create_folders) for folder in new_folders: print(f"created library folder {folder.pk}: {folder.path}") # Update previously top folders to descend from library fodlers. library_paths = library_model.objects.all().values_list("path", flat=True) orphan_top_folders = top_folders.exclude(path__in=library_paths) update_folders = [] for folder in orphan_top_folders: for library_path in library_paths: if Path(folder.path).is_relative_to(library_path): old_parent = folder.parent_folder folder.parent_folder = folder_model.objects.get(path=library_path) update_folders.append(folder) print( "updating", folder.path, "parent from", old_parent, "to", folder.parent_folder.pk, ) break count = folder_model.objects.bulk_update(update_folders, ["parent_folder"]) print(f"updated {count} folders.") # Link comics to new folders comic_model = apps.get_model("codex", "comic") ThroughModel = comic_model.folders.through # noqa:N806 tms = [] for new_folder in new_folders: comic_pks = comic_model.objects.filter( path__startswith=new_folder.path ).values_list("pk", flat=True) for comic_pk in comic_pks: tm = ThroughModel(comic_id=comic_pk, folder_id=new_folder.pk) tms.append(tm) print(f"linking {len(tms)} comics to new folders...") objs = ThroughModel.objects.bulk_create(tms) if objs: print(f"linked {len(objs)} comics to new folders.") class Migration(migrations.Migration): """Remove cover_image, sort_name. add issue_suffix & file_format.""" dependencies = [ ("codex", "0013_int_issue_count_longer_charfields"), ] operations = [ migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="comic", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="failedimport", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="folder", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="imprint", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="publisher", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="series", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.AlterField( # Fixes django 4.1 bug removing fields model_name="volume", name="sort_name", field=models.CharField(db_index=False, max_length=32), ), migrations.RemoveField( model_name="comic", name="cover_image", ), migrations.RemoveField( model_name="comic", name="sort_name", ), migrations.RemoveField( model_name="failedimport", name="sort_name", ), migrations.RemoveField( model_name="folder", name="sort_name", ), migrations.RemoveField( model_name="imprint", name="sort_name", ), migrations.RemoveField( model_name="publisher", name="sort_name", ), migrations.RemoveField( model_name="series", name="sort_name", ), migrations.RemoveField( model_name="volume", name="sort_name", ), migrations.AddField( model_name="comic", name="file_format", field=models.CharField( default="comic", max_length=5, # codex dependent validators removed in the future ), ), migrations.AddField( model_name="comic", name="issue_suffix", field=models.CharField(db_index=True, default="", max_length=16), ), migrations.AlterField( model_name="comic", name="issue", field=models.DecimalField( db_index=True, decimal_places=2, max_digits=10, null=True ), ), migrations.RunPython(add_library_folders), ] ================================================ FILE: codex/migrations/0015_link_comics_to_top_level_folders.py ================================================ """Fix no parent folder comics.""" from pathlib import Path from django.db import migrations def fix_no_parent_folder_comics(apps, _schema_editor) -> None: """Add a parent folder to orphan comics.""" folder_model = apps.get_model("codex", "folder") top_folders = folder_model.objects.filter(parent_folder=None).only("path") comic_model = apps.get_model("codex", "comic") orphan_comics = comic_model.objects.filter(parent_folder=None).only( "parent_folder", "path" ) update_comics = [] if orphan_comics: print(f"\nfixing {len(orphan_comics)} orphan comics.") for comic in orphan_comics: for folder in top_folders: if Path(comic.path).is_relative_to(folder.path): comic.parent_folder = folder print(f"linking {comic.path} to {folder.path}") update_comics.append(comic) break count = comic_model.objects.bulk_update(update_comics, ["parent_folder"]) if count: print(f"updated {count} comics.") class Migration(migrations.Migration): """Fix top level comics.""" dependencies = [("codex", "0014_pdf_issue_suffix_remove_cover_image_sort_name")] operations = [ migrations.RunPython(fix_no_parent_folder_comics), ] ================================================ FILE: codex/migrations/0016_remove_comic_cover_path_librarianstatus.py ================================================ """Generated by Django 4.0.6 on 2022-07-21 07:11.""" import os import shutil from pathlib import Path from django.db import migrations, models CONFIG_PATH = Path(os.environ.get("CODEX_CONFIG_DIR", Path.cwd() / "config")) OLD_COVER_CACHE = CONFIG_PATH / "static" CACHE_DIR = CONFIG_PATH / "cache" LATEST_VERSION_TO_TIMESTAMPS_MAP = {1: "codex_version", 2: "xapian_index_uuid"} def copy_versions_to_timestamp(apps, _schema_editor) -> None: """Convert old latest versions.""" lv_model = apps.get_model("codex", "latestversion") ts_model = apps.get_model("codex", "timestamp") lvs = lv_model.objects.filter(pk__in=LATEST_VERSION_TO_TIMESTAMPS_MAP.keys()).only( "version" ) for lv in lvs: name = LATEST_VERSION_TO_TIMESTAMPS_MAP.get(lv.pk) if not name: continue ts_model.objects.update_or_create(name=name, version=lv.version) print(f" Copied {name} version into Timestamps table.") def remove_old_caches(_apps, _schema_editor) -> None: """Clean up old cache locations.""" print("\n Removing old cover cache...") shutil.rmtree(OLD_COVER_CACHE, ignore_errors=True) if not CACHE_DIR.is_dir(): print(" COULD NOT FIND CACHE DIR!") return print(" Removing old default cache...") shutil.rmtree(CACHE_DIR, ignore_errors=True) CACHE_DIR.mkdir(parents=True, exist_ok=True) class Migration(migrations.Migration): """v0.11.0 migrations.""" dependencies = [ ("codex", "0015_link_comics_to_top_level_folders"), ] operations = [ migrations.RemoveField( model_name="library", name="schema_version", ), migrations.RemoveField( model_name="comic", name="cover_path", ), migrations.CreateModel( name="LibrarianStatus", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("type", models.CharField(db_index=True, max_length=64)), ("name", models.CharField(db_index=True, max_length=256, null=True)), ("complete", models.PositiveSmallIntegerField(default=0)), ("total", models.PositiveSmallIntegerField(default=None, null=True)), ("active", models.BooleanField(default=False)), ], options={ "unique_together": {("type", "name")}, }, ), migrations.CreateModel( name="Timestamp", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=32, unique=True)), ("version", models.CharField(max_length=32, null=True, default=None)), ], options={ "get_latest_by": "updated_at", "abstract": False, }, ), migrations.RunPython(remove_old_caches), migrations.RunPython(copy_versions_to_timestamp), migrations.DeleteModel( name="LatestVersion", ), ] ================================================ FILE: codex/migrations/0017_alter_timestamp_options_alter_adminflag_name_and_more.py ================================================ """Generated by Django 4.1 on 2022-08-13 19:52.""" import os import shutil from pathlib import Path from django.db import migrations, models import codex.models CONFIG_PATH = Path(os.environ.get("CODEX_CONFIG_DIR", Path.cwd() / "config")) COVER_ROOT = CONFIG_PATH / "cache" / "covers" def clear_covers(_apps, _schema_editor) -> None: """Remove old covers.""" shutil.rmtree(COVER_ROOT, ignore_errors=True) def remove_null_librarian_statuses(apps, _schema_editor) -> None: """Remove all librarian statuses.""" ls_model = apps.get_model("codex", "librarianstatus") ls_model.objects.all().delete() class Migration(migrations.Migration): """Choices and max_length changes, mostly.""" dependencies = [ ("codex", "0016_remove_comic_cover_path_librarianstatus"), ] operations = [ migrations.RunPython(clear_covers), migrations.RunPython(remove_null_librarian_statuses), migrations.AlterModelOptions( name="timestamp", options={}, ), migrations.AlterField( model_name="adminflag", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="character", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="comic", name="file_format", field=models.CharField( choices=[("comic", "Comic"), ("pdf", "Pdf")], default="comic", max_length=5, ), ), migrations.AlterField( model_name="comic", name="format", field=models.CharField(db_index=True, max_length=32, null=True), ), migrations.AlterField( model_name="comic", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="creditperson", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="creditrole", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="failedimport", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="folder", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="genre", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="imprint", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="librarianstatus", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="librarianstatus", name="type", field=models.CharField(db_index=True, max_length=32), ), migrations.AlterField( model_name="librarianstatus", name="active", field=models.DateTimeField(default=None, null=True), ), migrations.AlterField( model_name="librarianstatus", name="total", field=models.PositiveSmallIntegerField(default=0), ), migrations.AddField( model_name="librarianstatus", name="preactive", field=models.BooleanField(default=False), ), migrations.AlterField( model_name="library", name="path", field=models.CharField( db_index=True, max_length=4095, unique=True, validators=[codex.models.validate_dir_exists], ), ), migrations.AlterField( model_name="location", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="publisher", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="series", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="seriesgroup", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="storyarc", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="tag", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="team", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="timestamp", name="name", field=models.CharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="volume", name="name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterUniqueTogether( name="timestamp", unique_together={("name",)}, ), ] ================================================ FILE: codex/migrations/0018_rename_userbookmark_bookmark.py ================================================ """Generated by Django 4.1 on 2022-08-24 01:49.""" from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): """Rename Bookmark table & use '' for CharField nulls.""" dependencies = [ ("sessions", "0001_initial"), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("codex", "0017_alter_timestamp_options_alter_adminflag_name_and_more"), ] operations = [ migrations.RenameField( model_name="UserBookmark", old_name="bookmark", new_name="page" ), migrations.RenameModel( old_name="UserBookmark", new_name="Bookmark", ), migrations.AlterField( model_name="bookmark", name="fit_to", field=models.CharField( blank=True, default="", max_length=6, # Code dependent validators removed in the future ), ), migrations.AlterField( model_name="comic", name="age_rating", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="comic", name="country", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="comic", name="format", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="comic", name="language", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AlterField( model_name="comic", name="scan_info", field=models.CharField(default="", max_length=128), ), migrations.AlterField( model_name="comic", name="web", field=models.URLField(default=""), ), migrations.AlterField( model_name="timestamp", name="version", field=models.CharField(default="", max_length=32), ), ] ================================================ FILE: codex/migrations/0019_delete_queuejob.py ================================================ """Generated by Django 4.1.4 on 2022-12-07 20:25.""" from django.db import migrations class Migration(migrations.Migration): """Delete fake model used in Django Admin.""" dependencies = [ ("codex", "0018_rename_userbookmark_bookmark"), ] operations = [ migrations.DeleteModel( name="QueueJob", ), ] ================================================ FILE: codex/migrations/0020_remove_search_tables.py ================================================ """Generated by Django 4.1.5 on 2023-01-11 08:12.""" from django.db import migrations def rename_search_timestamp(apps, _schema_editor) -> None: """Rename the search_index_uuid timestamp to its new name.""" ts_model = apps.get_model("codex", "timestamp") ts_model.objects.filter(name="xapian_index_uuid").update(name="search_index_uuid") class Migration(migrations.Migration): """Run migrations.""" dependencies = [ ("codex", "0019_delete_queuejob"), ] operations = [ migrations.RunPython(rename_search_timestamp), migrations.AlterUniqueTogether( name="searchresult", unique_together=None, ), migrations.RemoveField( model_name="searchresult", name="comic", ), migrations.RemoveField( model_name="searchresult", name="query", ), migrations.DeleteModel( name="SearchQuery", ), migrations.DeleteModel( name="SearchResult", ), migrations.AlterModelOptions( name="librarianstatus", options={"verbose_name_plural": "LibrarianStatuses"}, ), migrations.AlterModelOptions( name="library", options={"verbose_name_plural": "Libraries"}, ), ] ================================================ FILE: codex/migrations/0021_bookmark_fit_to_choices_read_in_reverse.py ================================================ """Generated by Django 4.1.7 on 2023-02-26 21:49.""" from django.db import migrations, models def ensure_fit_to_has_valid_choices(apps, _schema_editor) -> None: """Ensure fit_to has valid choices before adding constraint.""" bookmark_model = apps.get_model("codex", "bookmark") choices = {"", "SCREEN", "WIDTH", "HEIGHT", "ORIG"} bookmark_model.objects.exclude(fit_to__in=choices).update(fit_to="") class Migration(migrations.Migration): """Add bookmark choices to database.""" dependencies = [ ("codex", "0020_remove_search_tables"), ] operations = [ migrations.RunPython(ensure_fit_to_has_valid_choices), migrations.AlterField( model_name="bookmark", name="fit_to", field=models.CharField( blank=True, choices=[ ("SCREEN", "Screen"), ("WIDTH", "Width"), ("HEIGHT", "Height"), ("ORIG", "Orig"), ], default="", max_length=6, ), ), migrations.AddField( model_name="bookmark", name="read_in_reverse", field=models.BooleanField(default=None, null=True), ), ] ================================================ FILE: codex/migrations/0022_bookmark_vertical_useractive_null_statuses.py ================================================ """Generated by Django 4.1.7 on 2023-03-10 07:34.""" import django.db.models.deletion from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): """Migrate.""" dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ("codex", "0021_bookmark_fit_to_choices_read_in_reverse"), ] operations = [ migrations.AlterField( model_name="librarianstatus", name="complete", field=models.PositiveSmallIntegerField(default=None, null=True), ), migrations.AlterField( model_name="librarianstatus", name="total", field=models.PositiveSmallIntegerField(default=None, null=True), ), migrations.CreateModel( name="UserActive", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "user", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, ), ), ], options={ "get_latest_by": "updated_at", "abstract": False, }, ), migrations.AddField( model_name="bookmark", name="vertical", field=models.BooleanField(default=None, null=True), ), ] ================================================ FILE: codex/migrations/0023_rename_credit_creator_and_more.py ================================================ """Generated by Django 4.1.7 on 2023-03-26 20:32.""" from pathlib import Path from django.db import connection, migrations, models NEW_FILE_TYPE_SUFFIXES = frozenset({"cbz", "cbr", "cbt", "pdf"}) def prepare_librarianstatus(apps, _schema_editor) -> None: """Delete all librarian statuses for re-creation.""" ls_model = apps.get_model("codex", "librarianstatus") ls_model.objects.all().delete() with connection.cursor() as cursor: sql = ( "UPDATE sqlite_sequence SET seq = 0" ' WHERE sqlite_sequence.name = "codex_librarianstatus"' ) cursor.execute(sql) def prepare_bookmarks(apps, _schema_editor) -> None: """Change bookmark fit_to to new values.""" bookmark_model = apps.get_model("codex", "bookmark") bookmarks = bookmark_model.objects.exclude(fit_to="") for bookmark in bookmarks: bookmark.fit_to = bookmark.fit_to[0] bookmark_model.objects.bulk_update(bookmarks, fields=["fit_to"]) def prepare_comics(apps, _schema_editor) -> None: """Prepare comics for field choice changes.""" comic_model = apps.get_model("codex", "comic") ## Comic.file_type comics = comic_model.objects.filter().only("path", "file_format") for comic in comics: if comic.file_format.lower() == "pdf": comic.file_format = "PDF" continue suffix = Path(comic.path).suffix[1:].lower() if comic.path else "" if suffix in NEW_FILE_TYPE_SUFFIXES: comic.file_format = suffix.upper() else: comic.file_format = "" comic_model.objects.bulk_update(comics, fields=["file_format"]) def prepare_adminflags(apps, _schema_editor) -> None: """Migrate update flag data.""" af_model = apps.get_model("codex", "adminflag") flags = af_model.objects.all() delete_pks = [] update_flags = [] for flag in flags: if "Folder" in flag.name: flag.name = "FV" elif "Registration" in flag.name: flag.name = "RG" elif "Users" in flag.name: flag.name = "NU" elif "Update" in flag.name: flag.name = "AU" elif "Search" in flag.name: flag.name = "SO" else: delete_pks.append(flag.pk) update_flags.append(flag) af_model.objects.filter(pk__in=delete_pks).delete() af_model.objects.bulk_update(update_flags, fields=("name",)) def prepare_timestamps(apps, _schema_editor) -> None: """Migrate timestamp data.""" ts_model = apps.get_model("codex", "timestamp") timestamps = ts_model.objects.all() update_timestamps = [] delete_pks = [] for ts in timestamps: if ts.name == "covers": ts.name = "CV" elif ts.name == "janitor": ts.name = "JA" elif ts.name == "codex_version": ts.name = "VR" elif ts.name == "search_index_uuid": ts.name = "SI" elif ts.name == "api_key": ts.name = "AP" else: delete_pks.append(ts.pk) continue update_timestamps.append(ts) ts_model.objects.filter(pk__in=delete_pks).delete() ts_model.objects.bulk_update(update_timestamps, fields=("name",)) class Migration(migrations.Migration): """Prepare data and then migrate schema.""" dependencies = [ ("codex", "0022_bookmark_vertical_useractive_null_statuses"), ] operations = [ # PREPARE migrations.RunPython(prepare_librarianstatus), migrations.RunPython(prepare_adminflags), migrations.RunPython(prepare_timestamps), migrations.RunPython(prepare_bookmarks), migrations.RunPython(prepare_comics), # RENAME MODELS migrations.RenameModel( old_name="Credit", new_name="Creator", ), migrations.RenameModel( old_name="CreditPerson", new_name="CreatorPerson", ), migrations.RenameModel( old_name="CreditRole", new_name="CreatorRole", ), # ADMIN FLAG migrations.RenameField( model_name="adminflag", old_name="name", new_name="key", ), migrations.AlterField( model_name="adminflag", name="key", field=models.CharField( choices=[ ("FV", "Folder View"), ("RG", "Registration"), ("NU", "Non Users"), ("AU", "Auto Update"), ("SO", "Search Index Optimize"), ], db_index=True, max_length=2, ), ), # TIMESTAMP migrations.RenameField( model_name="timestamp", old_name="name", new_name="key", ), migrations.AlterField( model_name="timestamp", name="key", field=models.CharField( choices=[ ("CV", "Covers"), ("JA", "Janitor"), ("VR", "Codex Version"), ("SI", "Search Index UUID"), ("AP", "API Key"), ], db_index=True, max_length=2, ), ), # LIBRARIAN STATUS migrations.RenameField( model_name="librarianstatus", old_name="name", new_name="subtitle", ), migrations.AlterUniqueTogether( name="librarianstatus", unique_together=set(), ), migrations.RenameField( model_name="librarianstatus", old_name="type", new_name="status_type", ), migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CCD", "Purge Covers"), ("CFO", "Find Orphan"), ("IDM", "Dirs Moved"), ("IFM", "Files Moved"), ("ITR", "Aggregate Tags"), ("ITQ", "Query Missing Fks"), ("ITC", "Create Fks"), ("IDU", "Dirs Modified"), ("IFU", "Files Modified"), ("IFC", "Files Created"), ("IMQ", "Query M2M Fields"), ("IMC", "Link M2M Fields"), ("IDD", "Dirs Deleted"), ("IFD", "Files Deleted"), ("IFI", "Failed Imports"), ("JTD", "Cleanup Fk"), ("JCU", "Codex Update"), ("JCR", "Codex Restart"), ("JCS", "Codex Stop"), ("JDO", "Db Optimize"), ("JDB", "Db Backup"), ("JSD", "Cleanup Sessions"), ("SIX", "Search Index Clear"), ("SIU", "Search Index Update"), ("SID", "Search Index Remove"), ("SIM", "Search Index Merge"), ("WPO", "Poll"), ], db_index=True, max_length=3, ), preserve_default=False, ), migrations.AlterUniqueTogether( name="librarianstatus", unique_together={("status_type", "subtitle")}, ), # BOOKMARK migrations.AlterField( model_name="bookmark", name="fit_to", field=models.CharField( blank=True, choices=[ ("S", "Screen"), ("W", "Width"), ("H", "Height"), ("O", "Orig"), ], default="", max_length=1, ), ), # COMIC migrations.RenameField( model_name="comic", old_name="credits", new_name="creators", ), migrations.RenameField( model_name="comic", old_name="format", new_name="original_format", ), migrations.RenameField( model_name="comic", old_name="file_format", new_name="file_type" ), migrations.AlterField( model_name="comic", name="file_type", field=models.CharField( blank=True, choices=[ ("CBZ", "Cbz"), ("CBR", "Cbr"), ("CBT", "Cbt"), ("PDF", "Pdf"), ], default="", max_length=3, ), ), migrations.AlterField( model_name="comic", name="comments", field=models.TextField(default=""), ), migrations.AlterField( model_name="comic", name="notes", field=models.TextField(default=""), ), migrations.AlterField( model_name="comic", name="summary", field=models.TextField(default=""), ), ] ================================================ FILE: codex/migrations/0024_comic_gtin_comic_story_arc_number.py ================================================ """Generated by Django 4.2.1 on 2023-05-10 22:44.""" from django.db import migrations, models class Migration(migrations.Migration): """Add fields.""" dependencies = [ ("codex", "0023_rename_credit_creator_and_more"), ] operations = [ migrations.AddField( model_name="comic", name="gtin", field=models.CharField(db_index=True, default="", max_length=32), ), migrations.AddField( model_name="comic", name="story_arc_number", field=models.PositiveSmallIntegerField(db_index=True, null=True), ), ] ================================================ FILE: codex/migrations/0025_add_story_arc_number.py ================================================ """Generated by Django 4.2.1 on 2023-05-17 19:22.""" import django.db.models.deletion from django.db import migrations, models def _create_story_arc_numbers(apps, _schema_editor) -> None: comic_model = apps.get_model("codex", "comic") san_model = apps.get_model("codex", "StoryArcNumber") num_sans = 0 comics = comic_model.objects.exclude(story_arcs=None) print() print(f"Comics with story arcs: {comics.count()}") # Create a StoryArcNumber for each comic for comic in comics: sans = set() first_done = False for sa in comic.story_arcs.all(): number = None if first_done else comic.story_arc_number kwargs = {"story_arc": sa, "number": number} san, created = san_model.objects.get_or_create(defaults=kwargs, **kwargs) num_sans += int(created) sans.add(san) first_done = True comic.story_arc_numbers.add(*sans) comic.save() num_sas = apps.get_model("codex", "StoryArc").objects.count() print(f"Created {num_sans} StoryArcNumbers for {num_sas} StoryArcs") class Migration(migrations.Migration): """Run Migrations.""" dependencies = [ ("codex", "0024_comic_gtin_comic_story_arc_number"), ] operations = [ migrations.CreateModel( name="StoryArcNumber", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("number", models.PositiveIntegerField(default=None, null=True)), ( "story_arc", models.ForeignKey( db_index=True, on_delete=django.db.models.deletion.CASCADE, to="codex.storyarc", ), ), ], ), migrations.AlterUniqueTogether( name="storyarcnumber", unique_together={("story_arc", "number")}, ), migrations.AddField( model_name="comic", name="story_arc_numbers", field=models.ManyToManyField(to="codex.storyarcnumber"), ), migrations.RunPython(_create_story_arc_numbers), migrations.RemoveField( model_name="comic", name="story_arc_number", ), migrations.RemoveField( model_name="comic", name="story_arcs", ), ] ================================================ FILE: codex/migrations/0026_comicbox_1.py ================================================ """Generated by Django 4.2.4 on 2023-08-21 02:15.""" from types import MappingProxyType import django.db.models.deletion import pycountry from comicbox.identifiers.urns import parse_string_identifier from django.db import migrations, models MIGRATE_GTIN = True FIELDS_TABLE_MAP = MappingProxyType( { "age_rating": "AgeRating", "country": "Country", "language": "Language", "scan_info": "ScanInfo", "original_format": "OriginalFormat", } ) SEARCH_INDEX_UUID = "SI" _ALPHA_2_LEN = 2 def _migrate_comments(apps, _schema_editor) -> None: comic_model = apps.get_model("codex", "comic") comics = comic_model.objects.exclude(comments="").filter(summary="") for comic in comics: comic.summary = comic.comments comic_model.objects.bulk_update(comics, ("summary",)) def _migrate_reading_direction(apps, _schema_editor) -> None: comic_model = apps.get_model("codex", "comic") comics = comic_model.objects.filter(read_ltr=False) for comic in comics: comic.reading_direction = "rtl" comic_model.objects.bulk_update(comics, ("reading_direction",)) def _get_pycountry_alpha_2(val, lookup) -> str: if val and len(val) > _ALPHA_2_LEN and (obj := lookup.lookup(val)): val = obj.alpha_2 return val def _create_new_rows(comic_model, model, field_name, model_name) -> dict: """Create new model rows.""" null_filter = {f"{field_name}__in": (None, "")} names = ( comic_model.objects.exclude(**null_filter) .values_list(field_name, flat=True) .distinct() ) names = sorted(names) names_map = {} objs = [] for old_name in names: if field_name == "country": new_name = _get_pycountry_alpha_2(old_name, pycountry.countries) elif field_name == "language": new_name = _get_pycountry_alpha_2(old_name, pycountry.languages) else: new_name = old_name names_map[old_name] = new_name obj = model(name=new_name) objs.append(obj) if objs: model.objects.bulk_create(objs) print(f"\tCreated {len(objs)} {model_name}s") return names_map def _link_rows_to_comic(comic_model, model, field_name, model_name, names_map) -> None: """Link rows to comics.""" update_comics = [] new_field_name = f"new_{field_name}" for old_name, new_name in names_map.items(): named_filter = {field_name: old_name} comics = comic_model.objects.filter(**named_filter) for comic in comics: obj = model.objects.get(name=new_name) setattr(comic, new_field_name, obj) update_comics.append(comic) if update_comics: comic_model.objects.bulk_update(update_comics, [new_field_name]) print(f"\tLinked {len(update_comics)} comics to {len(names_map)} {model_name}s") def _migrate_fields_to_tables(apps, _schema_editor) -> None: comic_model = apps.get_model("codex", "comic") print() for field_name, model_name in FIELDS_TABLE_MAP.items(): model = apps.get_model("codex", model_name.lower()) names_map = _create_new_rows(comic_model, model, field_name, model_name) _link_rows_to_comic(comic_model, model, field_name, model_name, names_map) def _migrate_bookmark(apps, _schema_editor) -> None: bm_model = apps.get_model("codex", "bookmark") bookmarks = bm_model.objects.exclude(read_in_reverse=None, vertical=None) update_bookmarks = [] for bm in bookmarks: if bm.read_in_reverse and not bm.vertical: bm.reading_direction = "rtl" elif not bm.read_in_reverse and bm.vertical: bm.reading_direction = "ttb" bm.two_pages = None elif bm.read_in_reverse and bm.vertical: bm.reading_direction = "btt" bm.two_pages = None else: bm.reading_direction = "ltr" update_bookmarks.append(bm) if update_bookmarks: bm_model.objects.bulk_update( update_bookmarks, ["reading_direction", "two_pages"] ) print(f"\tMigrated {len(update_bookmarks)} Bookmarks to use reading_direction") def _migrate_gtin_to_ids_scan(comics) -> tuple[dict, dict]: identifiers = {} comic_identifiers = {} for comic in comics: try: nid, _, nss = parse_string_identifier(comic.gtin) except Exception: nid = None nss = None if not nss: continue if nid not in identifiers: identifiers[nid] = set() identifiers[nid].add(nss) comic_identifiers[comic] = (nid, nss) return identifiers, comic_identifiers def _migrate_gtin_to_ids_create_id_types(identifier_type_model, identifiers) -> None: create_identifier_types = [] for name in identifiers: obj = identifier_type_model(name=name) create_identifier_types.append(obj) if create_identifier_types: identifier_type_model.objects.bulk_create(create_identifier_types) print(f"\tCreated {len(create_identifier_types)} IdentifierTypes") def _migrate_gtin_to_ids_create_ids( identifier_model, identifier_type_model, identifiers ) -> None: create_identifiers = [] for name, nsses in identifiers.items(): identifier_type = identifier_type_model.objects.get(name=name) for nss in nsses: identifier = identifier_model(identifier_type=identifier_type, nss=nss) create_identifiers.append(identifier) if create_identifiers: identifier_model.objects.bulk_create(create_identifiers) print(f"\tCreated {len(create_identifiers)} Identifiers") def _migrate_gtin_to_ids_link_comics( comic_model, identifier_model, comics, comic_identifiers ) -> None: through_model = comic_model.identifiers.through tms = [] for comic in comics: name, nss = comic_identifiers[comic] identifier = identifier_model.objects.get(identifier_type__name=name, nss=nss) tm = through_model(comic_id=comic.pk, identifier_id=identifier.pk) tms.append(tm) if tms: through_model.objects.bulk_create(tms) print(f"\tLinked {len(comics)} Comics to Identifiers") def _migrate_gtin_to_identifiers(apps, _schema_editor) -> None: """Migrate gtin to identifiers table.""" if not MIGRATE_GTIN: return comic_model = apps.get_model("codex", "comic") comics = comic_model.objects.exclude(gtin="") identifiers, comic_identifiers = _migrate_gtin_to_ids_scan(comics) identifier_type_model = apps.get_model("codex", "identifiertype") _migrate_gtin_to_ids_create_id_types(identifier_type_model, identifiers) identifier_model = apps.get_model("codex", "identifier") _migrate_gtin_to_ids_create_ids( identifier_model, identifier_type_model, identifiers ) _migrate_gtin_to_ids_link_comics( comic_model, identifier_model, comics, comic_identifiers ) def _migrate_volume_name(apps, _schema_editor) -> None: volume_model = apps.get_model("codex", "volume") all_volumes = volume_model.objects.all() update_volumes = [] for volume in all_volumes: if volume.name == "": volume.name = None update_volumes.append(volume) if update_volumes: volume_model.objects.bulk_update(update_volumes, ["name"]) print(f"\tMigrated {len(update_volumes)} volumes with empty names.") def _clear_search_index_uuid(apps, _schema_editor) -> None: """Clear the search index uuid to force a search rebuild.""" ts_model = apps.get_model("codex", "timestamp") ts_model.objects.filter(key=SEARCH_INDEX_UUID).update(version="") class Migration(migrations.Migration): """Migrate database.""" dependencies = [ ("auth", "0012_alter_user_first_name_max_length"), ("codex", "0025_add_story_arc_number"), ] operations = [ migrations.RenameModel( old_name="Creator", new_name="Contributor", ), migrations.RenameModel( old_name="CreatorPerson", new_name="ContributorPerson", ), migrations.RenameModel( old_name="CreatorRole", new_name="ContributorRole", ), migrations.RenameField( model_name="comic", old_name="creators", new_name="contributors", ), migrations.RenameField( model_name="comic", old_name="issue", new_name="issue_number", ), migrations.RunPython( _migrate_comments, ), migrations.RemoveField( model_name="comic", name="comments", ), migrations.RemoveField( model_name="comic", name="max_page", ), migrations.AddField( model_name="comic", name="monochrome", field=models.BooleanField(db_index=True, default=False), ), migrations.AddField( model_name="comic", name="review", field=models.TextField(default=""), ), migrations.AddField( model_name="comic", name="tagger", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AddField( model_name="comic", name="reading_direction", field=models.CharField( choices=[ ("ltr", "Ltr"), ("rtl", "Rtl"), ("ttb", "Ttb"), ("btt", "Btt"), ], db_index=True, default="ltr", max_length=3, ), ), migrations.RunPython( _migrate_reading_direction, ), migrations.RemoveField( model_name="comic", name="read_ltr", ), migrations.CreateModel( name="Tagger", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="ScanInfo", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="OriginalFormat", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Language", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Country", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="AgeRating", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "abstract": False, "unique_together": {("name",)}, }, ), migrations.AddField( model_name="comic", name="new_age_rating", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.agerating", ), ), migrations.AddField( model_name="comic", name="new_country", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.country", ), ), migrations.AddField( model_name="comic", name="new_language", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.language", ), ), migrations.AddField( model_name="comic", name="new_original_format", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.originalformat", ), ), migrations.AddField( model_name="comic", name="new_scan_info", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.scaninfo", ), ), migrations.AddField( model_name="comic", name="tagger", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.tagger", ), ), migrations.RunPython( _migrate_fields_to_tables, ), migrations.RemoveField( model_name="comic", name="age_rating", ), migrations.RemoveField( model_name="comic", name="country", ), migrations.RemoveField( model_name="comic", name="language", ), migrations.RemoveField( model_name="comic", name="original_format", ), migrations.RemoveField( model_name="comic", name="scan_info", ), migrations.RenameField( model_name="comic", old_name="new_age_rating", new_name="age_rating", ), migrations.RenameField( model_name="comic", old_name="new_country", new_name="country", ), migrations.RenameField( model_name="comic", old_name="new_language", new_name="language", ), migrations.RenameField( model_name="comic", old_name="new_original_format", new_name="original_format", ), migrations.RenameField( model_name="comic", old_name="new_scan_info", new_name="scan_info", ), migrations.AddField( model_name="bookmark", name="reading_direction", field=models.CharField( blank=True, choices=[ ("ltr", "Ltr"), ("rtl", "Rtl"), ("ttb", "Ttb"), ("btt", "Btt"), ], default="", max_length=3, ), ), migrations.RunPython(_migrate_bookmark), migrations.RemoveField( model_name="bookmark", name="read_in_reverse", ), migrations.RemoveField( model_name="bookmark", name="vertical", ), migrations.AlterModelOptions( name="adminflag", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="agerating", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="bookmark", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="character", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="comic", options={"get_latest_by": "updated_at", "verbose_name": "Issue"}, ), migrations.AlterModelOptions( name="contributor", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="contributorperson", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="contributorrole", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="country", options={"get_latest_by": "updated_at", "verbose_name_plural": "Countries"}, ), migrations.AlterModelOptions( name="failedimport", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="folder", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="genre", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="imprint", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="language", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="library", options={"get_latest_by": "updated_at", "verbose_name_plural": "Libraries"}, ), migrations.AlterModelOptions( name="location", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="originalformat", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="publisher", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="scaninfo", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="series", options={"get_latest_by": "updated_at", "verbose_name_plural": "Series"}, ), migrations.AlterModelOptions( name="seriesgroup", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="storyarc", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="storyarcnumber", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="tag", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="tagger", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="team", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="timestamp", options={"get_latest_by": "updated_at"}, ), migrations.AlterModelOptions( name="volume", options={"get_latest_by": "updated_at"}, ), migrations.CreateModel( name="IdentifierType", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "get_latest_by": "updated_at", "abstract": False, "unique_together": {("name",)}, }, ), migrations.CreateModel( name="Identifier", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("nss", models.CharField(max_length=128)), ("url", models.URLField(default="")), ( "identifier_type", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifiertype", ), ), ], options={ "get_latest_by": "updated_at", "abstract": False, "unique_together": {("identifier_type", "nss")}, }, ), migrations.AddField( model_name="comic", name="identifiers", field=models.ManyToManyField(to="codex.identifier"), ), migrations.RunPython(_migrate_gtin_to_identifiers), migrations.CreateModel( name="Story", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("name", models.CharField(db_index=True, max_length=128)), ], options={ "get_latest_by": "updated_at", "verbose_name_plural": "Stories", "abstract": False, "unique_together": {("name",)}, }, ), migrations.AddField( model_name="comic", name="stories", field=models.ManyToManyField(to="codex.story"), ), migrations.RemoveField( model_name="comic", name="gtin", ), migrations.RemoveField( model_name="comic", name="web", ), migrations.AlterField( model_name="volume", name="name", field=models.SmallIntegerField(db_index=True, default=None, null=True), ), migrations.RunPython(_migrate_volume_name), migrations.CreateModel( name="GroupAuth", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("exclude", models.BooleanField(db_index=True, default=False)), ( "group", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, to="auth.group" ), ), ], options={ "get_latest_by": "updated_at", "abstract": False, "unique_together": {("group",)}, }, ), migrations.AlterModelOptions( name="librarianstatus", options={ "get_latest_by": "updated_at", "verbose_name_plural": "LibrarianStatuses", }, ), migrations.RunPython(_clear_search_index_uuid), ] ================================================ FILE: codex/migrations/0027_import_order_and_covers.py ================================================ """Generated by Django 5.0.6 on 2024-06-06 02:22.""" import os from contextlib import suppress from pathlib import Path import django.db.models.deletion from django.db import migrations, models from codex.models.util import get_sort_name CONFIG_PATH = Path(os.environ.get("CODEX_CONFIG_DIR", Path.cwd() / "config")) COVER_ROOT = CONFIG_PATH / "cache" / "covers" _GROUP_MODEL_NAMES = ( "publisher", "imprint", "series", "storyarc", "folder", "comic", ) def _set_sort_name(obj) -> None: """Create sort_name for model.""" obj.sort_name = get_sort_name(obj.name) def _generate_sort_name(apps, _schema_editor) -> None: """Update new sort_name field.""" for model_name in _GROUP_MODEL_NAMES: model = apps.get_model("codex", model_name) update_fields = ("sort_name",) only_fields = ("name", *update_fields) update_groups = [] objs = model.objects.only(*only_fields) for obj in objs: _set_sort_name(obj) update_groups.append(obj) if update_groups: model.objects.bulk_update(update_groups, update_fields) print(f"\tUpdated sort_name for {len(update_groups)} {model_name}s") def _remove_cover_symlinks(_apps, _schema_editor) -> None: """Remove old missing cover symlinks.""" # Handled programmatically now. count = 0 for dirpath_str, _, filenames in COVER_ROOT.walk(): dirpath = Path(dirpath_str) for filename in filenames: full_path = dirpath / filename if full_path.is_symlink(): with suppress(OSError): full_path.unlink(missing_ok=True) count += 1 if count: print(f"Removed {count} missing-cover symlinks from cover cache.") class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0026_comicbox_1"), ] operations = [ migrations.AddField( model_name="comic", name="sort_name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AddField( model_name="folder", name="sort_name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AddField( model_name="imprint", name="sort_name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AddField( model_name="library", name="covers_only", field=models.BooleanField(db_index=True, default=False), ), migrations.AddField( model_name="publisher", name="sort_name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AddField( model_name="series", name="sort_name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AddField( model_name="storyarc", name="sort_name", field=models.CharField(db_index=True, default="", max_length=128), ), migrations.AlterField( model_name="adminflag", name="key", field=models.CharField( choices=[ ("FV", "Folder View"), ("RG", "Registration"), ("NU", "Non Users"), ("AU", "Auto Update"), ("SO", "Search Index Optimize"), ("IM", "Import Metadata"), ], db_index=True, max_length=2, ), ), migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CCD", "Purge Covers"), ("CFO", "Find Orphan"), ("IDM", "Dirs Moved"), ("IFM", "Files Moved"), ("ITR", "Aggregate Tags"), ("ITQ", "Query Missing Fks"), ("ITC", "Create Fks"), ("IDU", "Dirs Modified"), ("IFU", "Files Modified"), ("IFC", "Files Created"), ("IMQ", "Query M2M Fields"), ("IMC", "Link M2M Fields"), ("IDD", "Dirs Deleted"), ("IFD", "Files Deleted"), ("IFI", "Failed Imports"), ("ICQ", "Query Missing Covers"), ("ICM", "Covers Moved"), ("ICU", "Covers Modified"), ("ICC", "Covers Created"), ("ICD", "Covers Deleted"), ("ICL", "Covers Link"), ("IGU", "Group Update"), ("IAF", "Adopt Folders"), ("JTD", "Cleanup Fk"), ("JCU", "Codex Update"), ("JCR", "Codex Restart"), ("JCS", "Codex Stop"), ("JDO", "Db Optimize"), ("JDB", "Db Backup"), ("JSD", "Cleanup Sessions"), ("JCD", "Cleanup Covers"), ("SIX", "Search Index Clear"), ("SIU", "Search Index Update"), ("SID", "Search Index Remove"), ("SIM", "Search Index Merge"), ("WPO", "Poll"), ], db_index=True, max_length=3, ), ), migrations.AlterField( model_name="timestamp", name="key", field=models.CharField( choices=[ ("JA", "Janitor"), ("VR", "Codex Version"), ("SI", "Search Index UUID"), ("AP", "API Key"), ], db_index=True, max_length=2, ), ), migrations.CreateModel( name="CustomCover", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("path", models.CharField(db_index=True, max_length=4095)), ("stat", models.JSONField(null=True)), ( "group", models.CharField( choices=[ ("p", "P"), ("i", "I"), ("s", "S"), ("a", "A"), ("f", "F"), ], db_index=True, max_length=1, ), ), ( "sort_name", models.CharField(db_index=True, default="", max_length=128), ), ( "library", models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, to="codex.library" ), ), ], options={ "get_latest_by": "updated_at", "abstract": False, "unique_together": {("library", "path")}, }, ), migrations.AddField( model_name="folder", name="custom_cover", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="codex.customcover", ), ), migrations.AddField( model_name="imprint", name="custom_cover", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="codex.customcover", ), ), migrations.AddField( model_name="publisher", name="custom_cover", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="codex.customcover", ), ), migrations.AddField( model_name="series", name="custom_cover", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="codex.customcover", ), ), migrations.AddField( model_name="storyarc", name="custom_cover", field=models.ForeignKey( default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to="codex.customcover", ), ), migrations.RunPython(_generate_sort_name), migrations.RunPython(_remove_cover_symlinks), ] ================================================ FILE: codex/migrations/0028_telemeter.py ================================================ """Generated by Django 5.0.6 on 2024-07-02 21:01.""" from django.db import migrations, models class Migration(migrations.Migration): """Migrate db.""" dependencies = [("codex", "0027_import_order_and_covers")] operations = [ migrations.AlterField( model_name="adminflag", name="key", field=models.CharField( choices=[ ("FV", "Folder View"), ("RG", "Registration"), ("NU", "Non Users"), ("AU", "Auto Update"), ("SO", "Search Index Optimize"), ("IM", "Import Metadata"), ("ST", "Send Telemetry"), ], db_index=True, max_length=2, ), ), migrations.AlterField( model_name="timestamp", name="key", field=models.CharField( choices=[ ("JA", "Janitor"), ("VR", "Codex Version"), ("SI", "Search Index UUID"), ("AP", "API Key"), ("TS", "Telemeter Sent"), ], db_index=True, max_length=2, ), ), ] ================================================ FILE: codex/migrations/0029_comicfts.py ================================================ """Generated by Django 5.0.8 on 2024-08-07 22:07.""" import django.db.models.deletion from django.db import migrations, models class Migration(migrations.Migration): """Run migrations.""" dependencies = [ ("codex", "0028_telemeter"), ] operations = [ migrations.SeparateDatabaseAndState( state_operations=[ migrations.CreateModel( name="ComicFTS", fields=[ ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "comic", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to="codex.comic", ), ), ("body", models.TextField()), ], options={ "get_latest_by": "updated_at", "managed": False, }, ), ], database_operations=[ migrations.RunSQL( sql=( "CREATE VIRTUAL TABLE codex_comicfts USING fts5(" "comic_id UNINDEXED, created_at UNINDEXED, " "updated_at UNINDEXED, " "publisher, imprint, series, volume, issue, name, age_rating, " "country, language, " "notes, original_format, review, scan_info, summary, " "tagger, " "characters, contributors, genres," "locations, roles, series_groups, stories, " "story_arcs, tags, teams, " "reading_direction, file_type)" ), reverse_sql="DROP TABLE IF EXISTS codex_comicfts", ), ], ) ] ================================================ FILE: codex/migrations/0030_nocase_collation_day_month_indexes_status_types.py ================================================ """Generated by Django 5.1.1 on 2024-09-12 00:13.""" from django.db import migrations, models class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0029_comicfts"), ] operations = [ migrations.AlterUniqueTogether( name="groupauth", unique_together=set(), ), migrations.AlterField( model_name="adminflag", name="key", field=models.CharField( choices=[ ("FV", "Folder View"), ("RG", "Registration"), ("NU", "Non Users"), ("AU", "Auto Update"), ("IM", "Import Metadata"), ("ST", "Send Telemetry"), ], db_index=True, max_length=2, ), ), migrations.AlterField( model_name="comic", name="day", field=models.PositiveSmallIntegerField(db_index=True, null=True), ), migrations.AlterField( model_name="comic", name="file_type", field=models.CharField( blank=True, choices=[ ("CBZ", "Cbz"), ("CBR", "Cbr"), ("CBT", "Cbt"), ("PDF", "Pdf"), ], db_collation="nocase", db_index=True, default="", max_length=3, ), ), migrations.AlterField( model_name="comic", name="issue_suffix", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=16 ), ), migrations.AlterField( model_name="comic", name="month", field=models.PositiveSmallIntegerField(db_index=True, null=True), ), migrations.AlterField( model_name="comic", name="notes", field=models.TextField(db_collation="nocase", default=""), ), migrations.AlterField( model_name="comic", name="reading_direction", field=models.CharField( choices=[ ("ltr", "Ltr"), ("rtl", "Rtl"), ("ttb", "Ttb"), ("btt", "Btt"), ], db_collation="nocase", db_index=True, default="ltr", max_length=3, ), ), migrations.AlterField( model_name="comic", name="review", field=models.TextField(db_collation="nocase", default=""), ), migrations.AlterField( model_name="comic", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="comic", name="summary", field=models.TextField(db_collation="nocase", default=""), ), migrations.AlterField( model_name="customcover", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="folder", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="imprint", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CCD", "Purge Covers"), ("CFO", "Find Orphan"), ("IDM", "Dirs Moved"), ("IFM", "Files Moved"), ("ITR", "Aggregate Tags"), ("ITQ", "Query Missing Fks"), ("ITC", "Create Fks"), ("IDU", "Dirs Modified"), ("IFU", "Files Modified"), ("IFC", "Files Created"), ("IMQ", "Query M2M Fields"), ("IMC", "Link M2M Fields"), ("IDD", "Dirs Deleted"), ("IFD", "Files Deleted"), ("IFI", "Failed Imports"), ("ICQ", "Query Missing Covers"), ("ICM", "Covers Moved"), ("ICU", "Covers Modified"), ("ICC", "Covers Created"), ("ICD", "Covers Deleted"), ("ICL", "Covers Link"), ("IGU", "Group Update"), ("IAF", "Adopt Folders"), ("JTD", "Cleanup Fk"), ("JLV", "Codex Latest Version"), ("JCU", "Codex Update"), ("JCR", "Codex Restart"), ("JCS", "Codex Stop"), ("JDO", "Db Optimize"), ("JDB", "Db Backup"), ("JSD", "Cleanup Sessions"), ("JCD", "Cleanup Covers"), ("JCB", "Cleanup Bookmarks"), ("JIF", "Integrity Fk"), ("JIC", "Integrity Check"), ("JFC", "Fts Integrity Check"), ("JFR", "Fts Rebuild"), ("SIX", "Search Index Clear"), ("SIU", "Search Index Update"), ("SIC", "Search Index Create"), ("SID", "Search Index Remove"), ("SIO", "Search Index Optimize"), ("WPO", "Poll"), ], db_index=True, max_length=3, ), ), migrations.AlterField( model_name="publisher", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="series", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="storyarc", name="sort_name", field=models.CharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), ] ================================================ FILE: codex/migrations/0031_adminflag_banner.py ================================================ """Generated by Django 5.1.2 on 2024-10-22 03:09.""" from django.db import migrations, models class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0030_nocase_collation_day_month_indexes_status_types"), ] operations = [ migrations.AddField( model_name="adminflag", name="value", field=models.CharField(blank=True, default="", max_length=128), ), migrations.AlterField( model_name="adminflag", name="key", field=models.CharField( choices=[ ("FV", "Folder View"), ("RG", "Registration"), ("NU", "Non Users"), ("AU", "Auto Update"), ("IM", "Import Metadata"), ("ST", "Send Telemetry"), ("BT", "Banner Text"), ], db_index=True, max_length=2, ), ), migrations.AlterField( model_name="timestamp", name="key", field=models.CharField( choices=[ ("AP", "API Key"), ("VR", "Codex Version"), ("JA", "Janitor"), ("TS", "Telemeter Sent"), ], db_index=True, max_length=2, ), ), ] ================================================ FILE: codex/migrations/0032_alter_librarianstatus_preactive.py ================================================ """Generated by Django 5.1.3 on 2024-11-14 21:50.""" from django.db import migrations, models class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0031_adminflag_banner"), ] operations = [ migrations.AlterField( model_name="librarianstatus", name="preactive", field=models.DateTimeField(default=None, null=True), ), ] ================================================ FILE: codex/migrations/0033_alter_librarianstatus_status_type.py ================================================ """Generated by Django 5.1.4 on 2024-12-21 03:50.""" from django.db import migrations, models class Migration(migrations.Migration): """Migrate DB.""" dependencies = [ ("codex", "0032_alter_librarianstatus_preactive"), ] operations = [ migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CCD", "Purge Covers"), ("CFO", "Find Orphan"), ("IAF", "Adopt Folders"), ("ICC", "Covers Created"), ("ICD", "Covers Deleted"), ("ICL", "Covers Link"), ("ICM", "Covers Moved"), ("ICQ", "Query Missing Covers"), ("ICU", "Covers Modified"), ("IDD", "Dirs Deleted"), ("IDM", "Dirs Moved"), ("IDU", "Dirs Modified"), ("IFC", "Files Created"), ("IFD", "Files Deleted"), ("IFI", "Failed Imports"), ("IFM", "Files Moved"), ("IFU", "Files Modified"), ("IGU", "Group Update"), ("IMC", "Link M2M Fields"), ("IMQ", "Query M2M Fields"), ("ITC", "Create Fks"), ("ITQ", "Query Missing Fks"), ("ITR", "Aggregate Tags"), ("JCB", "Cleanup Bookmarks"), ("JCD", "Cleanup Covers"), ("JCR", "Codex Restart"), ("JCS", "Codex Stop"), ("JCU", "Codex Update"), ("JDB", "Db Backup"), ("JDO", "Db Optimize"), ("JFC", "Fts Integrity Check"), ("JFR", "Fts Rebuild"), ("JIC", "Integrity Check"), ("JIF", "Integrity Fk"), ("JLV", "Codex Latest Version"), ("JSD", "Cleanup Sessions"), ("JTD", "Cleanup Fk"), ("SIC", "Search Index Create"), ("SID", "Search Index Remove"), ("SIO", "Search Index Optimize"), ("SIU", "Search Index Update"), ("SIX", "Search Index Clear"), ("WPO", "Poll"), ], db_index=True, max_length=3, ), ), ] ================================================ FILE: codex/migrations/0034_comicbox2.py ================================================ """Generated by Django 5.2.4 on 2025-07-21 15:05.""" import django.db.models.deletion from django.db import migrations, models import codex.models.fields import codex.models.library class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0033_alter_librarianstatus_status_type"), ] operations = [ migrations.AlterField( model_name="agerating", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="character", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="comic", name="community_rating", field=codex.models.fields.CoercingDecimalField( db_index=True, decimal_places=2, default=None, max_digits=5, null=True ), ), migrations.AlterField( model_name="comic", name="critical_rating", field=codex.models.fields.CoercingDecimalField( db_index=True, decimal_places=2, default=None, max_digits=5, null=True ), ), migrations.AlterField( model_name="comic", name="day", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, null=True ), ), migrations.AlterField( model_name="comic", name="decade", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, null=True ), ), migrations.AlterField( model_name="comic", name="file_type", field=codex.models.fields.CleaningCharField( blank=True, choices=[ ("CBZ", "Cbz"), ("CBR", "Cbr"), ("CBT", "Cbt"), ("PDF", "Pdf"), ], db_collation="nocase", db_index=True, default="", max_length=3, ), ), migrations.AlterField( model_name="comic", name="issue_number", field=codex.models.fields.CoercingDecimalField( db_index=True, decimal_places=2, max_digits=10, null=True ), ), migrations.AlterField( model_name="comic", name="issue_suffix", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=16 ), ), migrations.AlterField( model_name="comic", name="month", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, null=True ), ), migrations.AlterField( model_name="comic", name="name", field=codex.models.fields.CleaningCharField( db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="comic", name="notes", field=codex.models.fields.CleaningTextField( db_collation="nocase", default="" ), ), migrations.AlterField( model_name="comic", name="page_count", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, default=0 ), ), migrations.AlterField( model_name="comic", name="path", field=codex.models.fields.CleaningCharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="comic", name="reading_direction", field=codex.models.fields.CleaningCharField( choices=[ ("ltr", "Ltr"), ("rtl", "Rtl"), ("ttb", "Ttb"), ("btt", "Btt"), ], db_collation="nocase", db_index=True, default="ltr", max_length=3, ), ), migrations.AlterField( model_name="comic", name="review", field=codex.models.fields.CleaningTextField( db_collation="nocase", default="" ), ), migrations.AlterField( model_name="comic", name="sort_name", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="comic", name="summary", field=codex.models.fields.CleaningCharField( db_collation="nocase", default="" ), ), migrations.AlterField( model_name="comic", name="year", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, null=True ), ), migrations.AlterField( model_name="contributorperson", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="contributorrole", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="country", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="customcover", name="path", field=codex.models.fields.CleaningCharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="failedimport", name="path", field=codex.models.fields.CleaningCharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="folder", name="name", field=codex.models.fields.CleaningCharField( db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="folder", name="path", field=codex.models.fields.CleaningCharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="folder", name="sort_name", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="genre", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="identifier", name="nss", field=codex.models.fields.CleaningCharField(max_length=128), ), migrations.AlterField( model_name="identifiertype", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="imprint", name="name", field=codex.models.fields.CleaningCharField( db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="imprint", name="sort_name", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="language", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="library", name="path", field=codex.models.fields.CleaningCharField( db_index=True, max_length=4095, unique=True, validators=[codex.models.library.validate_dir_exists], ), ), migrations.AlterField( model_name="location", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="originalformat", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="publisher", name="name", field=codex.models.fields.CleaningCharField( db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="publisher", name="sort_name", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="scaninfo", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="series", name="name", field=codex.models.fields.CleaningCharField( db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="series", name="sort_name", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="series", name="volume_count", field=codex.models.fields.CoercingPositiveSmallIntegerField(null=True), ), migrations.AlterField( model_name="seriesgroup", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="story", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="storyarc", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="storyarc", name="sort_name", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="storyarcnumber", name="number", field=codex.models.fields.CoercingPositiveSmallIntegerField( default=None, null=True ), ), migrations.AlterField( model_name="tag", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="tagger", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="team", name="name", field=codex.models.fields.CleaningCharField(db_index=True, max_length=128), ), migrations.AlterField( model_name="volume", name="issue_count", field=codex.models.fields.CoercingPositiveSmallIntegerField(null=True), ), migrations.AlterField( model_name="volume", name="name", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, default=None, null=True ), ), migrations.RenameModel( old_name="Contributor", new_name="Credit", ), migrations.RenameModel( old_name="ContributorPerson", new_name="CreditPerson", ), migrations.RenameModel( old_name="ContributorRole", new_name="CreditRole", ), migrations.RenameField( model_name="comic", old_name="contributors", new_name="credits", ), migrations.SeparateDatabaseAndState( database_operations=[ migrations.RunSQL( sql="DROP TABLE IF EXISTS codex_comicfts;", ), migrations.RunSQL( sql="CREATE VIRTUAL TABLE codex_comicfts USING fts5(comic_id UNINDEXED, created_at UNINDEXED, updated_at UNINDEXED, publisher, imprint, series, issue, name, collection_title, age_rating, country, language, notes, original_format, review, scan_info, summary, tagger, characters, credits, genres, locations, roles, series_groups, stories, story_arcs, tags, teams, reading_direction, file_type, universes, identifiers, sources)", reverse_sql="DROP TABLE IF EXISTS codex_comicfts", ), ], state_operations=[ migrations.DeleteModel( name="ComicFTS", ), migrations.CreateModel( name="ComicFTS", fields=[ ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "comic", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to="codex.comic", ), ), ("body", models.TextField()), ], options={ "get_latest_by": "updated_at", "managed": False, }, ), ], ), migrations.AddField( model_name="comic", name="metadata_mtime", field=models.DateTimeField(null=True), ), migrations.AddField( model_name="comic", name="main_character", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, related_name="main_character_in_comics", to="codex.character", ), ), migrations.AddField( model_name="comic", name="main_team", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, related_name="main_team_in_comics", to="codex.character", ), ), migrations.CreateModel( name="Universe", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "name", codex.models.fields.CleaningCharField( db_index=True, max_length=128 ), ), ("designation", codex.models.fields.CleaningCharField(max_length=128)), ( "identifier", models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), ], options={ "get_latest_by": "updated_at", "abstract": False, "unique_together": {("name",)}, }, ), migrations.AddField( model_name="comic", name="universes", field=models.ManyToManyField(to="codex.universe"), ), migrations.RemoveField( model_name="comic", name="community_rating", ), migrations.AlterUniqueTogether( name="character", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="creditperson", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="location", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="story", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="storyarc", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="tag", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="team", unique_together={("name",)}, ), migrations.AddField( model_name="character", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="creditperson", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="imprint", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="location", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="publisher", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="series", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="story", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="storyarc", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="tag", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="team", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.RenameModel( old_name="IdentifierType", new_name="IdentifierSource", ), migrations.RenameField( model_name="identifier", old_name="nss", new_name="key", ), migrations.RenameField( model_name="identifier", old_name="identifier_type", new_name="source", ), migrations.AlterUniqueTogether( name="genre", unique_together={("name",)}, ), migrations.AlterUniqueTogether( name="identifier", unique_together=set(), ), migrations.AddField( model_name="genre", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="creditrole", name="identifier", field=models.ForeignKey( null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.identifier", ), ), migrations.AddField( model_name="identifier", name="id_type", field=models.CharField( choices=[ ("storyarc", "Arc"), ("character", "Character"), ("genre", "Genre"), ("imprint", "Imprint"), ("comic", "Issue"), ("location", "Location"), ("publisher", "Publisher"), ("series", "Series"), ("story", "Story"), ("tag", "Tag"), ("team", "Team"), ("universe", "Universe"), ("creditrole", "Role"), ("creditperson", "Creator"), ], db_index=True, default="comic", max_length=16, ), preserve_default=False, ), migrations.AlterUniqueTogether( name="identifier", unique_together={("source", "id_type", "key")}, ), migrations.AlterUniqueTogether( name="volume", unique_together=set(), ), migrations.AddField( model_name="volume", name="number_to", field=codex.models.fields.CoercingPositiveSmallIntegerField( db_index=True, default=None, null=True ), ), migrations.AlterUniqueTogether( name="imprint", unique_together={("publisher", "name")}, ), migrations.AlterUniqueTogether( name="series", unique_together={("imprint", "name")}, ), migrations.AlterUniqueTogether( name="volume", unique_together={("series", "name", "number_to")}, ), migrations.AlterField( model_name="adminflag", name="key", field=models.CharField( choices=[ ("AU", "Auto Update"), ("BT", "Banner Text"), ("FV", "Folder View"), ("IM", "Import Metadata"), ("LI", "Lazy Import Metadata"), ("NU", "Non Users"), ("RG", "Registration"), ("ST", "Send Telemetry"), ], db_index=True, max_length=2, ), ), migrations.AddField( model_name="comic", name="collection_title", field=codex.models.fields.CleaningCharField( db_collation="nocase", db_index=True, default="", max_length=128 ), ), migrations.AlterField( model_name="comic", name="file_type", field=codex.models.fields.CleaningCharField( blank=True, choices=[ ("CBR", "Cbr"), ("CBZ", "Cbz"), ("CBT", "Cbt"), ("CB7", "Cb7"), ("PDF", "Pdf"), ], db_collation="nocase", db_index=True, default="", max_length=3, ), ), migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CFO", "Find Orphan Covers"), ("CRC", "Remove Covers"), ("IAT", "Aggregate Tags From Comics"), ("ICC", "Create Comics"), ("ICT", "Create Tags"), ("ICV", "Create Custom Covers"), ("IFC", "Mark Failed Failed Imports"), ("IFD", "Clean Up Failed Imports"), ("IFQ", "Query Failed Imports"), ("IFU", "Update Failed Imports"), ("IGU", "Update Timestamps For Browser Groups"), ("ILT", "Link Tags"), ("ILV", "Link Custom Covers"), ("IQC", "Query Comics"), ("IQL", "Query Tag Links"), ("IQT", "Query Missing Tags"), ("IQV", "Query Missing Custom Covers"), ("IRC", "Remove Comics"), ("IRF", "Remove Folders"), ("IRT", "Read Tags From Comics"), ("IRV", "Remove Custom Covers"), ("ISC", "Create Search Index Entries"), ("ISU", "Update Search Index Entries"), ("IUC", "Update Comics"), ("IUT", "Update Tags"), ("IUV", "Update Custom Covers"), ("JAF", "Adopt Orphan Folders"), ("JCT", "Cleanup Orphan Tags"), ("JCU", "Update Codex Server Software"), ("JDB", "Backup Database"), ("JDO", "Optimize Database"), ("JID", "Check Integrity Of Entire Database"), ("JIF", "Check Integrtity Of Database Foreign Keys"), ("JIS", "Check Integrity Of Full Text Virtual Table"), ("JLV", "Check Codex Latest Version"), ("JRB", "Cleanup Orphan Bookmarks"), ("JRS", "Cleanup Old Sessions"), ("JRV", "Cleanup Orphan Covers"), ("JSR", "Rebuild Full Text Search Virtual Table"), ("RCR", "Restart Codex Server"), ("RCS", "Stop Codex Server"), ("SIO", "Optimize Search Virtual Table"), ("SIR", "Clean Orphan Search Entries"), ("SIX", "Clear Full Text Search Table"), ("SSC", "Sync New Search Entries"), ("SSU", "Sync Old Search Entries"), ("WPO", "Poll Library"), ], db_index=True, max_length=3, ), ), ] ================================================ FILE: codex/migrations/0035_fts_optmize.py ================================================ """Generated by Django 5.2.4 on 2025-07-21 15:05.""" from itertools import chain import django.db.models.deletion from comicbox.enums.maps.identifiers import ID_SOURCE_NAME_MAP, get_id_source_by_alias from django.db import migrations, models def _map_identifiers_to_canonical_names(apps) -> dict: id_source_model = apps.get_model("codex", "identifiersource") identifier_source_map = {} for source in id_source_model.objects.all(): if id_source_enum := get_id_source_by_alias(source.name, default=None): canon_name = ID_SOURCE_NAME_MAP.get(id_source_enum, source.name) if canon_name != source.name: if canon_name not in identifier_source_map: identifier_source_map[canon_name] = set() identifier_source_map[canon_name] |= set( source.identifier_set.values_list("pk", flat=True) ) return identifier_source_map def _prepare_canonical_id_sources(apps, identifier_source_map) -> tuple: id_source_model = apps.get_model("codex", "identifiersource") source_names = identifier_source_map.keys() existing_id_sources = id_source_model.objects.filter(name__in=source_names).only( "name" ) existing_id_source_names = id_source_model.objects.filter( name__in=source_names ).values_list("name", flat=True) create_id_source_names = sorted( frozenset(set(identifier_source_map.keys()) - set(existing_id_source_names)) ) return tuple( id_source_model(name=name) for name in create_id_source_names ), existing_id_sources def _create_link_map(identifier_source_map, sources) -> dict: link_map = {} for id_source in chain(*sources): for id_pk in identifier_source_map[id_source.name]: link_map[id_pk] = id_source return link_map def _prepare_updatatable_identifiers(apps, identifier_source_map, sources) -> tuple: link_map = _create_link_map(identifier_source_map, sources) identifier_model = apps.get_model("codex", "identifier") updateable_identifiers = identifier_model.objects.filter(pk__in=link_map.keys()) obj_list = [] for identifier in updateable_identifiers: identifier.source = link_map[identifier.pk] obj_list.append(identifier) return tuple(obj_list) def _create_canonical_sources(apps) -> tuple[dict, tuple]: print("Examining identifier sources for conversion to canonical sources...") identifier_source_map = _map_identifiers_to_canonical_names(apps) print("Preparing missing canonical identifiers sources for creation...") obj_list, existing_id_sources = _prepare_canonical_id_sources( apps, identifier_source_map ) id_source_model = apps.get_model("codex", "identifiersource") created_id_sources = id_source_model.objects.bulk_create(obj_list) count = len(obj_list) print(f"Created {count} canonical identifier sources") return identifier_source_map, (existing_id_sources, created_id_sources) def _update_identifiers_with_canonical_sources( apps, identifier_source_map, sources ) -> None: print("Preparing identifiers for update with canonical sources...") obj_list = _prepare_updatatable_identifiers(apps, identifier_source_map, sources) identifier_model = apps.get_model("codex", "identifier") identifier_model.objects.bulk_update(obj_list, fields=("source",)) count = len(obj_list) print(f"Updated {count} identifiers with canonical sources.") def _convert_identifier_sources(apps, _schema_editor) -> None: identifier_source_map, sources = _create_canonical_sources(apps) _update_identifiers_with_canonical_sources(apps, identifier_source_map, sources) class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0034_comicbox2"), ] operations = [ migrations.RunPython(_convert_identifier_sources), migrations.SeparateDatabaseAndState( database_operations=[ migrations.RunSQL( sql="DROP TABLE IF EXISTS codex_comicfts;", ), migrations.RunSQL( sql="CREATE VIRTUAL TABLE codex_comicfts USING fts5(comic_id UNINDEXED, created_at UNINDEXED, updated_at UNINDEXED, publisher, imprint, series, name, collection_title, age_rating, country, language, original_format, review, scan_info, summary, tagger, characters, credits, genres, locations, roles, series_groups, stories, story_arcs, tags, teams, universes, sources)", reverse_sql="DROP TABLE IF EXISTS codex_comicfts", ), ], state_operations=[ migrations.DeleteModel( name="ComicFTS", ), migrations.CreateModel( name="ComicFTS", fields=[ ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "comic", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to="codex.comic", ), ), ("body", models.TextField()), ], options={ "get_latest_by": "updated_at", "managed": False, }, ), ], ), ] ================================================ FILE: codex/migrations/0036_alter_comic_path_alter_customcover_path_and_more.py ================================================ """Generated by Django 5.2.8 on 2025-11-24 02:28.""" from django.db import migrations, models import codex.models.library class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0035_fts_optmize"), ] operations = [ migrations.AlterField( model_name="comic", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="customcover", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="failedimport", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="folder", name="path", field=models.CharField(db_index=True, max_length=4095), ), migrations.AlterField( model_name="library", name="path", field=models.CharField( db_index=True, max_length=4095, unique=True, validators=[codex.models.library.validate_dir_exists], ), ), ] ================================================ FILE: codex/migrations/0037_redefine_reading_direction_filetype_choices.py ================================================ """Generated by Django 5.2.10 on 2026-01-29 22:05.""" from django.db import migrations, models import codex.models.fields class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0036_alter_comic_path_alter_customcover_path_and_more"), ] operations = [ migrations.AlterField( model_name="bookmark", name="reading_direction", field=models.CharField( blank=True, choices=[ ("rtl", "Rtl"), ("ltr", "Ltr"), ("ttb", "Ttb"), ("btt", "Btt"), ], default="", max_length=3, ), ), migrations.AlterField( model_name="comic", name="file_type", field=codex.models.fields.CleaningCharField( blank=True, choices=[ ("CBZ", "Cbz"), ("CBR", "Cbr"), ("CB7", "Cb7"), ("CBT", "Cbt"), ("PDF", "Pdf"), ], db_collation="nocase", db_index=True, default="", max_length=3, ), ), migrations.AlterField( model_name="comic", name="reading_direction", field=codex.models.fields.CleaningCharField( choices=[ ("rtl", "Rtl"), ("ltr", "Ltr"), ("ttb", "Ttb"), ("btt", "Btt"), ], db_collation="nocase", db_index=True, default="ltr", max_length=3, ), ), ] ================================================ FILE: codex/migrations/0038_settings_tables.py ================================================ """Generated by Django 6.0.3 on 2026-03-30 06:33.""" from importlib import import_module import django.db.migrations.operations.special import django.db.models.deletion from django.conf import settings from django.db import migrations, models import codex.models.settings # Session keys to migrate and their target model / client value. _MIGRATIONS = ( ("browser", "settingsbrowser", "api"), ("reader", "settingsreader", "api"), ("opds_browser", "settingsbrowser", "opds"), ("opds_reader", "settingsreader", "opds"), ) _BROWSER_DIRECT_KEYS = frozenset( { "top_group", "order_by", "order_reverse", "custom_covers", "dynamic_covers", "twenty_four_hour_time", "always_show_filename", } ) _READER_DIRECT_KEYS = frozenset( { "fit_to", "two_pages", "reading_direction", "read_rtl_in_reverse", "finish_on_last_page", "page_transition", "cache_book", } ) _FILTER_KEYS = frozenset( { "bookmark", "age_rating", "characters", "country", "credits", "critical_rating", "decade", "file_type", "genres", "identifier_source", "language", "locations", "monochrome", "original_format", "reading_direction", "series_groups", "stories", "story_arcs", "tagger", "tags", "teams", "universes", "year", } ) _SHOW_KEYS = frozenset({"p", "i", "s", "v"}) def create_default_show_row(apps, _schema_editor): """Create the default model.""" show_model = apps.get_model("codex", "SettingsBrowserShow") show_model.objects.create(pk=1) def _get_or_create_show(show_model, show_dict): """Get or create a SettingsBrowserShow row from a show dict.""" show_kwargs = {k: bool(show_dict.get(k, k in ("p", "s"))) for k in _SHOW_KEYS} show, _ = show_model.objects.get_or_create(**show_kwargs) return show def _create_browser_filters(filters_model, browser, settings_dict): """Create a SettingsBrowserFilters row from the session settings dict.""" filters_dict = settings_dict.get("filters", {}) kwargs = {"browser": browser} for key in _FILTER_KEYS: if key in filters_dict: value = filters_dict[key] kwargs[key] = value if key == "bookmark" else (list(value) if value else []) # Legacy "bookmark_filter" key at the top level. if "bookmark_filter" in settings_dict and "bookmark" not in kwargs: kwargs["bookmark"] = settings_dict["bookmark_filter"] filters_model.objects.create(**kwargs) def _create_browser_last_route(route_model, browser, settings_dict): """Create a SettingsBrowserLastRoute row from the session settings dict.""" route_dict = settings_dict.get("last_route", {}) kwargs = {"browser": browser} if "group" in route_dict: kwargs["group"] = route_dict["group"] if "pks" in route_dict: pks = route_dict["pks"] kwargs["pks"] = list(pks) if pks else [0] if "page" in route_dict: kwargs["page"] = route_dict["page"] route_model.objects.create(**kwargs) def _migrate_browser_session(settings_dict, model_map, client, user, session_key): """Migrate one browser session dict into the new related-model tables.""" browser_model = model_map["settingsbrowser"] scope_filter = {"client": client, "name": ""} existing = None if user: existing = browser_model.objects.filter(user=user, **scope_filter).first() if existing is None: existing = browser_model.objects.filter( session_id=session_key, **scope_filter, ).first() if existing is not None: return True # Show — get or create the shared row. show_dict = settings_dict.get("show", {}) show = _get_or_create_show(model_map["settingsbrowsershow"], show_dict) # Browser — direct fields only. create_kwargs = { "user": user, "session_id": session_key, "show": show, **scope_filter, } for key in _BROWSER_DIRECT_KEYS: if key in settings_dict: create_kwargs[key] = settings_dict[key] for key in ("q", "search"): if key in settings_dict: create_kwargs["search"] = settings_dict[key] break browser = browser_model.objects.create(**create_kwargs) # Filters and last route — one-to-one children. _create_browser_filters(model_map["settingsbrowserfilters"], browser, settings_dict) _create_browser_last_route( model_map["settingsbrowserlastroute"], browser, settings_dict, ) return True def _migrate_reader_session(settings_dict, model_map, client, user, session_key): """Migrate one reader session dict into the SettingsReader table.""" model = model_map["settingsreader"] scope_filter = {"client": client, "comic": None, "series": None, "folder": None} existing = None if user: existing = model.objects.filter(user=user, **scope_filter).first() if existing is None: existing = model.objects.filter( session_id=session_key, **scope_filter, ).first() if existing is not None: return True create_kwargs = {"user": user, "session_id": session_key, **scope_filter} for key in _READER_DIRECT_KEYS: if key in settings_dict: create_kwargs[key] = settings_dict[key] model.objects.create(**create_kwargs) return True _MIGRATE_FN_MAP = { "settingsbrowser": _migrate_browser_session, "settingsreader": _migrate_reader_session, } def _get_user(user_model, session_data): """Resolve the authenticated user from session data, or None.""" user_id = session_data.get("_auth_user_id") if user_id is None: return None try: return user_model.objects.get(pk=user_id) except user_model.DoesNotExist: return None def migrate_session_forward_model( session_data, data_key, model_map, model_name, client, user, session_key ) -> bool: """Read settings from one Django session and populate the new tables.""" settings_dict = session_data.get(data_key) if not settings_dict: return False migrate_fn = _MIGRATE_FN_MAP[model_name] return migrate_fn(settings_dict, model_map, client, user, session_key) def migrate_session_forward( db_session, user_model, model_map, encoder, migrated, cleaned ) -> tuple[int, int]: """Read settings from one Django session and populate the new tables.""" session_data = db_session.get_decoded() if not session_data: return migrated, cleaned user = _get_user(user_model, session_data) session_key = db_session.session_key any_migrated = False for data_key, model_name, client in _MIGRATIONS: if migrate_session_forward_model( session_data, data_key, model_map, model_name, client, user, session_key ): any_migrated = True migrated += 1 if any_migrated: changed = False for data_key, _model_name, _client in _MIGRATIONS: if data_key in session_data: del session_data[data_key] changed = True if changed: db_session.session_data = encoder.encode(session_data) db_session.save(update_fields=["session_data"]) cleaned += 1 return migrated, cleaned def migrate_sessions_forward(apps, _schema_editor): """Read settings from every Django session and populate the new tables.""" from django.contrib.sessions.models import Session user_model = apps.get_model(*settings.AUTH_USER_MODEL.split(".")) model_map = { "settingsbrowser": apps.get_model("codex", "SettingsBrowser"), "settingsreader": apps.get_model("codex", "SettingsReader"), "settingsbrowsershow": apps.get_model("codex", "SettingsBrowserShow"), "settingsbrowserfilters": apps.get_model("codex", "SettingsBrowserFilters"), "settingsbrowserlastroute": apps.get_model("codex", "SettingsBrowserLastRoute"), } session_store = import_module(settings.SESSION_ENGINE).SessionStore encoder = session_store() sessions = Session.objects.order_by("-expire_date") migrated = 0 cleaned = 0 for db_session in sessions.iterator(): migrated, cleaned = migrate_session_forward( db_session, user_model, model_map, encoder, migrated, cleaned ) class Migration(migrations.Migration): """Migrate db.""" dependencies = [ ("codex", "0037_redefine_reading_direction_filetype_choices"), ("sessions", "0001_initial"), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CFO", "Find Orphan Covers"), ("CRC", "Remove Covers"), ("IAT", "Aggregate Tags From Comics"), ("ICC", "Create Comics"), ("ICT", "Create Tags"), ("ICV", "Create Custom Covers"), ("IFC", "Mark Failed Failed Imports"), ("IFD", "Clean Up Failed Imports"), ("IFQ", "Query Failed Imports"), ("IFU", "Update Failed Imports"), ("IGU", "Update Timestamps For Browser Groups"), ("ILT", "Link Tags"), ("ILV", "Link Custom Covers"), ("IQC", "Query Comics"), ("IQL", "Query Tag Links"), ("IQT", "Query Missing Tags"), ("IQV", "Query Missing Custom Covers"), ("IRC", "Remove Comics"), ("IRF", "Remove Folders"), ("IRT", "Read Tags From Comics"), ("IRV", "Remove Custom Covers"), ("ISC", "Create Search Index Entries"), ("ISU", "Update Search Index Entries"), ("IUC", "Update Comics"), ("IUT", "Update Tags"), ("IUV", "Update Custom Covers"), ("JAF", "Adopt Orphan Folders"), ("JAS", "Cleanup Orphan Settings"), ("JCT", "Cleanup Orphan Tags"), ("JCU", "Update Codex Server Software"), ("JDB", "Backup Database"), ("JDO", "Optimize Database"), ("JID", "Check Integrity Of Entire Database"), ("JIF", "Check Integrtity Of Database Foreign Keys"), ("JIS", "Check Integrity Of Full Text Virtual Table"), ("JLV", "Check Codex Latest Version"), ("JRB", "Cleanup Orphan Bookmarks"), ("JRS", "Cleanup Old Sessions"), ("JRV", "Cleanup Orphan Covers"), ("JSR", "Rebuild Full Text Search Virtual Table"), ("RCR", "Restart Codex Server"), ("RCS", "Stop Codex Server"), ("SIO", "Optimize Search Virtual Table"), ("SIR", "Clean Orphan Search Entries"), ("SIX", "Clear Full Text Search Table"), ("SSC", "Sync New Search Entries"), ("SSU", "Sync Old Search Entries"), ("WPO", "Poll Library"), ], db_index=True, max_length=3, ), ), migrations.CreateModel( name="SettingsReader", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "client", models.CharField( choices=[("api", "API"), ("opds", "OPDS")], db_index=True, default="api", max_length=4, ), ), ( "fit_to", models.CharField( blank=True, choices=[ ("S", "Screen"), ("W", "Width"), ("H", "Height"), ("O", "Orig"), ], default="", max_length=1, ), ), ("two_pages", models.BooleanField(default=None, null=True)), ( "reading_direction", models.CharField( blank=True, choices=[ ("rtl", "Rtl"), ("ltr", "Ltr"), ("ttb", "Ttb"), ("btt", "Btt"), ], default="", max_length=3, ), ), ("read_rtl_in_reverse", models.BooleanField(default=None, null=True)), ("finish_on_last_page", models.BooleanField(default=None, null=True)), ("page_transition", models.BooleanField(default=None, null=True)), ("cache_book", models.BooleanField(default=None, null=True)), ( "comic", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.comic", ), ), ( "folder", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="codex.folder", ), ), ( "series", models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.series", ), ), ( "session", models.ForeignKey( blank=True, null=True, on_delete=codex.models.settings.cascade_if_user_null, to="sessions.session", ), ), ( "user", models.ForeignKey( blank=True, null=True, on_delete=codex.models.settings.cascade_if_session_null, to=settings.AUTH_USER_MODEL, ), ), ], options={ "verbose_name_plural": "reader settings", "get_latest_by": "updated_at", "abstract": False, "constraints": [ models.CheckConstraint( condition=models.Q( models.Q( ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ), models.Q( ("comic__isnull", False), ("folder__isnull", True), ("series__isnull", True), ), models.Q( ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", False), ), models.Q( ("comic__isnull", True), ("folder__isnull", False), ("series__isnull", True), ), _connector="OR", ), name="settingsreader_scope_xor", ), models.UniqueConstraint( condition=models.Q( ("user__isnull", False), ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ), fields=("user", "client"), name="unique_settingsreader_user_global", ), models.UniqueConstraint( condition=models.Q( ("session__isnull", False), ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ), fields=("session", "client"), name="unique_settingsreader_session_global", ), models.UniqueConstraint( condition=models.Q( ("comic__isnull", False), ("user__isnull", False) ), fields=("user", "client", "comic"), name="unique_settingsreader_user_comic", ), models.UniqueConstraint( condition=models.Q( ("comic__isnull", False), ("session__isnull", False) ), fields=("session", "client", "comic"), name="unique_settingsreader_session_comic", ), models.UniqueConstraint( condition=models.Q( ("series__isnull", False), ("user__isnull", False) ), fields=("user", "client", "series"), name="unique_settingsreader_user_series", ), models.UniqueConstraint( condition=models.Q( ("series__isnull", False), ("session__isnull", False) ), fields=("session", "client", "series"), name="unique_settingsreader_session_series", ), models.UniqueConstraint( condition=models.Q( ("folder__isnull", False), ("user__isnull", False) ), fields=("user", "client", "folder"), name="unique_settingsreader_user_folder", ), models.UniqueConstraint( condition=models.Q( ("folder__isnull", False), ("session__isnull", False) ), fields=("session", "client", "folder"), name="unique_settingsreader_session_folder", ), ], }, ), migrations.CreateModel( name="SettingsBrowser", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "client", models.CharField( choices=[("api", "API"), ("opds", "OPDS")], db_index=True, default="api", max_length=4, ), ), ( "name", models.CharField( blank=True, db_index=True, default="", max_length=128 ), ), ( "top_group", models.CharField( choices=[ ("p", "Publishers"), ("i", "Imprints"), ("s", "Series"), ("v", "Volumes"), ("c", "Issues"), ("f", "Folders"), ("a", "Story Arcs"), ], default="p", max_length=1, ), ), ( "order_by", models.CharField( choices=[ ("created_at", "Added Time"), ("age_rating", "Age Rating"), ("child_count", "Child Count"), ("critical_rating", "Critical Rating"), ("filename", "Filename"), ("size", "File Size"), ("bookmark_updated_at", "Last Read"), ("sort_name", "Name"), ("page_count", "Page Count"), ("date", "Publish Date"), ("search_score", "Search Score"), ("story_arc_number", "Story Arc Number"), ("updated_at", "Updated Time"), ], default="", max_length=32, ), ), ("order_reverse", models.BooleanField(default=False)), ("search", models.CharField(blank=True, default="", max_length=4095)), ("custom_covers", models.BooleanField(default=True)), ("dynamic_covers", models.BooleanField(default=True)), ("twenty_four_hour_time", models.BooleanField(default=False)), ("always_show_filename", models.BooleanField(default=False)), ( "session", models.ForeignKey( blank=True, null=True, on_delete=codex.models.settings.cascade_if_user_null, to="sessions.session", ), ), ( "user", models.ForeignKey( blank=True, null=True, on_delete=codex.models.settings.cascade_if_session_null, to=settings.AUTH_USER_MODEL, ), ), ], options={ "verbose_name_plural": "browser settings", "get_latest_by": "updated_at", "abstract": False, "constraints": [ models.UniqueConstraint( condition=models.Q(("user__isnull", False)), fields=("user", "client", "name"), name="unique_settingsbrowser_user", ), models.UniqueConstraint( condition=models.Q(("session__isnull", False)), fields=("session", "client", "name"), name="unique_settingsbrowser_session", ), ], }, ), migrations.CreateModel( name="SettingsBrowserFilters", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "bookmark", models.CharField( blank=True, choices=[ ("", "All"), ("IN_PROGRESS", "In Progress"), ("READ", "Read"), ("UNREAD", "Unread"), ], default="", max_length=16, ), ), ("age_rating", models.JSONField(default=list)), ("characters", models.JSONField(default=list)), ("country", models.JSONField(default=list)), ("credits", models.JSONField(default=list)), ("critical_rating", models.JSONField(default=list)), ("decade", models.JSONField(default=list)), ("file_type", models.JSONField(default=list)), ("genres", models.JSONField(default=list)), ("identifier_source", models.JSONField(default=list)), ("language", models.JSONField(default=list)), ("locations", models.JSONField(default=list)), ("monochrome", models.JSONField(default=list)), ("original_format", models.JSONField(default=list)), ("reading_direction", models.JSONField(default=list)), ("series_groups", models.JSONField(default=list)), ("stories", models.JSONField(default=list)), ("story_arcs", models.JSONField(default=list)), ("tagger", models.JSONField(default=list)), ("tags", models.JSONField(default=list)), ("teams", models.JSONField(default=list)), ("universes", models.JSONField(default=list)), ("year", models.JSONField(default=list)), ( "browser", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, related_name="filters", to="codex.settingsbrowser", ), ), ], options={ "verbose_name_plural": "browser filter settings", "get_latest_by": "updated_at", "abstract": False, }, ), migrations.CreateModel( name="SettingsBrowserLastRoute", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ( "group", models.CharField( choices=[ ("p", "Publishers"), ("i", "Imprints"), ("s", "Series"), ("v", "Volumes"), ("c", "Issues"), ("f", "Folders"), ("a", "Story Arcs"), ("r", "Root"), ], default="r", max_length=1, ), ), ("pks", models.JSONField(default=list)), ("page", models.PositiveSmallIntegerField(default=1)), ( "browser", models.OneToOneField( on_delete=django.db.models.deletion.CASCADE, related_name="last_route", to="codex.settingsbrowser", ), ), ], options={ "verbose_name_plural": "browser last-route settings", "get_latest_by": "updated_at", "abstract": False, }, ), migrations.CreateModel( name="SettingsBrowserShow", fields=[ ( "id", models.AutoField( auto_created=True, primary_key=True, serialize=False, verbose_name="ID", ), ), ("created_at", models.DateTimeField(auto_now_add=True)), ("updated_at", models.DateTimeField(auto_now=True)), ("p", models.BooleanField(default=True)), ("i", models.BooleanField(default=False)), ("s", models.BooleanField(default=True)), ("v", models.BooleanField(default=False)), ], options={ "verbose_name_plural": "browser show settings", "get_latest_by": "updated_at", "abstract": False, "constraints": [ models.UniqueConstraint( fields=("p", "i", "s", "v"), name="unique_settingsbrowsershow_flags", ) ], }, ), migrations.RunPython( code=create_default_show_row, reverse_code=django.db.migrations.operations.special.RunPython.noop, ), migrations.AddField( model_name="settingsbrowser", name="show", field=models.ForeignKey( # Created by RunPython above. default=1, # ty: ignore[invalid-argument-type] on_delete=django.db.models.deletion.PROTECT, related_name="+", to="codex.settingsbrowsershow", ), preserve_default=False, ), migrations.RunPython( code=migrate_sessions_forward, reverse_code=django.db.migrations.operations.special.RunPython.noop, ), migrations.RemoveField( model_name="bookmark", name="fit_to", ), migrations.RemoveField( model_name="bookmark", name="reading_direction", ), migrations.RemoveField( model_name="bookmark", name="two_pages", ), migrations.AlterField( model_name="librarianstatus", name="status_type", field=models.CharField( choices=[ ("CCC", "Create Covers"), ("CFO", "Find Orphan Covers"), ("CRC", "Remove Covers"), ("IAT", "Aggregate Tags From Comics"), ("ICC", "Create Comics"), ("ICT", "Create Tags"), ("ICV", "Create Custom Covers"), ("IFC", "Mark Failed Failed Imports"), ("IFD", "Clean Up Failed Imports"), ("IFQ", "Query Failed Imports"), ("IFU", "Update Failed Imports"), ("IGU", "Update Timestamps For Browser Groups"), ("ILT", "Link Tags"), ("ILV", "Link Custom Covers"), ("IQC", "Query Comics"), ("IQL", "Query Tag Links"), ("IQT", "Query Missing Tags"), ("IQV", "Query Missing Custom Covers"), ("IRC", "Remove Comics"), ("IRF", "Remove Folders"), ("IRT", "Read Tags From Comics"), ("IRV", "Remove Custom Covers"), ("ISC", "Create Search Index Entries"), ("ISU", "Update Search Index Entries"), ("IUC", "Update Comics"), ("IUT", "Update Tags"), ("IUV", "Update Custom Covers"), ("JAF", "Adopt Orphan Folders"), ("JAS", "Cleanup Orphan Settings"), ("JCT", "Cleanup Orphan Tags"), ("JCU", "Update Codex Server Software"), ("JDB", "Backup Database"), ("JDO", "Optimize Database"), ("JID", "Check Integrity Of Entire Database"), ("JIF", "Check Integrtity Of Database Foreign Keys"), ("JIS", "Check Integrity Of Full Text Virtual Table"), ("JLV", "Check Codex Latest Version"), ("JRB", "Cleanup Orphan Bookmarks"), ("JRS", "Cleanup Old Sessions"), ("JRV", "Cleanup Orphan Covers"), ("JSR", "Rebuild Full Text Search Virtual Table"), ("RCR", "Restart Codex Server"), ("RCS", "Stop Codex Server"), ("SIO", "Optimize Search Virtual Table"), ("SIR", "Clean Orphan Search Entries"), ("SIX", "Clear Full Text Search Table"), ("SSC", "Sync New Search Entries"), ("SSU", "Sync Old Search Entries"), ("WPO", "Poll Library"), ("WRS", "Restart File Watcher"), ], db_index=True, max_length=3, ), ), migrations.RemoveConstraint( model_name="settingsreader", name="settingsreader_scope_xor", ), migrations.RemoveConstraint( model_name="settingsreader", name="unique_settingsreader_user_global", ), migrations.RemoveConstraint( model_name="settingsreader", name="unique_settingsreader_session_global", ), migrations.AddField( model_name="settingsreader", name="story_arc", field=models.ForeignKey( blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to="codex.storyarc", ), ), migrations.AddConstraint( model_name="settingsreader", constraint=models.CheckConstraint( condition=models.Q( models.Q( ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ("story_arc__isnull", True), ), models.Q( ("comic__isnull", False), ("folder__isnull", True), ("series__isnull", True), ("story_arc__isnull", True), ), models.Q( ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", False), ("story_arc__isnull", True), ), models.Q( ("comic__isnull", True), ("folder__isnull", False), ("series__isnull", True), ("story_arc__isnull", True), ), models.Q( ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ("story_arc__isnull", False), ), _connector="OR", ), name="settingsreader_scope_xor", ), ), migrations.AddConstraint( model_name="settingsreader", constraint=models.UniqueConstraint( condition=models.Q( ("user__isnull", False), ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ("story_arc__isnull", True), ), fields=("user", "client"), name="unique_settingsreader_user_global", ), ), migrations.AddConstraint( model_name="settingsreader", constraint=models.UniqueConstraint( condition=models.Q( ("session__isnull", False), ("comic__isnull", True), ("folder__isnull", True), ("series__isnull", True), ("story_arc__isnull", True), ), fields=("session", "client"), name="unique_settingsreader_session_global", ), ), migrations.AddConstraint( model_name="settingsreader", constraint=models.UniqueConstraint( condition=models.Q( ("story_arc__isnull", False), ("user__isnull", False) ), fields=("user", "client", "story_arc"), name="unique_settingsreader_user_story_arc", ), ), migrations.AddConstraint( model_name="settingsreader", constraint=models.UniqueConstraint( condition=models.Q( ("session__isnull", False), ("story_arc__isnull", False) ), fields=("session", "client", "story_arc"), name="unique_settingsreader_session_story_arc", ), ), ] ================================================ FILE: codex/migrations/__init__.py ================================================ """Database migrations.""" ================================================ FILE: codex/models/__init__.py ================================================ """Codex Django models.""" from codex.models.admin import * from codex.models.bookmark import * from codex.models.comic import * from codex.models.groups import * from codex.models.identifier import * from codex.models.library import * from codex.models.named import * from codex.models.paths import * from codex.models.settings import * ================================================ FILE: codex/models/admin.py ================================================ """Admin models.""" import base64 import uuid from typing import override from django.conf import settings from django.contrib.auth.models import Group from django.db.models import ( CASCADE, BooleanField, CharField, DateTimeField, OneToOneField, PositiveSmallIntegerField, TextChoices, ) from django.utils.translation import gettext_lazy as _ from codex.choices.admin import AdminFlagChoices from codex.choices.statii import ADMIN_STATUS_TITLES from codex.models.base import MAX_FIELD_LEN, MAX_NAME_LEN, BaseModel from codex.models.choices import max_choices_len, text_choices_from_map __all__ = ("AdminFlag", "LibrarianStatus", "Timestamp", "UserActive") class AdminFlag(BaseModel): """Flags set by administrators.""" FALSE_DEFAULTS = frozenset({AdminFlagChoices.AUTO_UPDATE}) key = CharField( db_index=True, max_length=max_choices_len(AdminFlagChoices), choices=AdminFlagChoices.choices, ) on = BooleanField(default=True) value = CharField(max_length=MAX_NAME_LEN, default="", blank=True) class Meta(BaseModel.Meta): """Constraints.""" unique_together = ("key",) class LibrarianStatus(BaseModel): """Active Library Tasks.""" StatusChoices = text_choices_from_map( ADMIN_STATUS_TITLES.inverse, "LibrarianStatusChoices" ) status_type = CharField( db_index=True, max_length=max_choices_len(StatusChoices), choices=StatusChoices.choices, ) subtitle = CharField(db_index=True, max_length=MAX_NAME_LEN) complete = PositiveSmallIntegerField(null=True, default=None) total = PositiveSmallIntegerField(null=True, default=None) preactive = DateTimeField(null=True, default=None) active = DateTimeField(null=True, default=None) class Meta(BaseModel.Meta): """Constraints.""" unique_together = ("status_type", "subtitle") verbose_name_plural = "LibrarianStatuses" class Timestamp(BaseModel): """Timestamped Named Strings.""" class Choices(TextChoices): """Choices for Timestamps.""" API_KEY = "AP", _("API Key") CODEX_VERSION = "VR", _("Codex Version") JANITOR = "JA", _("Janitor") TELEMETER_SENT = "TS", _("Telemeter Sent") key = CharField( db_index=True, max_length=max_choices_len(Choices), choices=Choices.choices, ) version = CharField(max_length=MAX_FIELD_LEN, default="") @classmethod def touch(cls, choice) -> None: """Touch a timestamp.""" cls.objects.get(key=choice.value).save() def save_uuid_version(self) -> None: """Create base64 uuid.""" uuid_bytes = uuid.uuid4().bytes b64_bytes = base64.urlsafe_b64encode(uuid_bytes) self.version = b64_bytes.decode("utf-8").replace("=", "") self.save() class Meta(BaseModel.Meta): """Constraints.""" unique_together = ("key",) @override def __repr__(self) -> str: """Print name for choice.""" return self.Choices(self.key).name class UserActive(BaseModel): """User last active record.""" user = OneToOneField(settings.AUTH_USER_MODEL, db_index=True, on_delete=CASCADE) class GroupAuth(BaseModel): """Extended Attributes for Groups.""" group = OneToOneField(Group, db_index=True, on_delete=CASCADE) exclude = BooleanField(db_index=True, default=False) ================================================ FILE: codex/models/base.py ================================================ """Base model.""" from typing import override from django.db.models import DateTimeField, Model from django.db.models.base import ModelBase from codex.models.fields import CleaningCharField from codex.models.query import GroupByManager __all__ = ("BaseModel", "NamedModel") MAX_PATH_LEN = 4095 MAX_NAME_LEN = 128 MAX_FIELD_LEN = 32 MAX_ISSUE_SUFFIX_LEN = 16 class BaseModel(Model): """A base model with universal fields.""" created_at = DateTimeField(auto_now_add=True) updated_at = DateTimeField(auto_now=True) objects = GroupByManager() class Meta(ModelBase): """Without this a real table is created and joined to.""" # Model.Meta is not inheritable. abstract = True get_latest_by = "updated_at" def presave(self): """Create values before save.""" class NamedModel(BaseModel): """A for simple named tables.""" name = CleaningCharField(db_index=True, max_length=MAX_NAME_LEN) class Meta(BaseModel.Meta): """Defaults to uniquely named, must be overridden.""" abstract = True unique_together: tuple[str, ...] = ("name",) @override def __repr__(self) -> str: """Return the name.""" return str(self.name) ================================================ FILE: codex/models/bookmark.py ================================================ """Bookmark model.""" from django.conf import settings from django.contrib.sessions.models import Session from django.db.models import ( CASCADE, BooleanField, ForeignKey, PositiveSmallIntegerField, ) from codex.models.base import BaseModel from codex.models.comic import Comic __all__ = ("Bookmark", "cascade_if_user_null") def cascade_if_user_null( collector, field, sub_objs, using, # noqa: ARG001 ) -> None: """ Cascade only if the user field is null. Do this to keep deleting ephemeral session data from Bookmark table. Adapted from: https://github.com/django/django/blob/master/django/db/models/deletion.py#L23 """ # only cascade the ones with null user fields. null_user_sub_objs = [sub_obj for sub_obj in sub_objs if sub_obj.user is None] if null_user_sub_objs: collector.collect( null_user_sub_objs, source=field.remote_field.model, source_attr=field.name, nullable=field.null, ) # Set them all to null if field.null: # and not connections[using].features.can_defer_constraint_checks: collector.add_field_update(field, None, sub_objs) class Bookmark(BaseModel): """Persist user's bookmarks.""" user = ForeignKey( settings.AUTH_USER_MODEL, db_index=True, on_delete=CASCADE, null=True ) session = ForeignKey( Session, db_index=True, on_delete=cascade_if_user_null, null=True ) comic = ForeignKey(Comic, db_index=True, on_delete=CASCADE) page = PositiveSmallIntegerField(db_index=True, null=True) finished = BooleanField(default=False, db_index=True) class Meta(BaseModel.Meta): """Constraints.""" unique_together = ("user", "session", "comic") ================================================ FILE: codex/models/choices.py ================================================ """Choices for fields.""" from collections.abc import Mapping from enum import Enum, _EnumDict from typing import cast from comicbox.enums.comicbox import FileTypeEnum, ReadingDirectionEnum from django.db.models import Choices, TextChoices from django.db.models.enums import ChoicesType def _prepare_text_choices_class_dict(class_name: str) -> _EnumDict: """Class dict.""" return ChoicesType.__prepare__(class_name, (TextChoices,)) def _create_text_choices_class( class_name: str, cls_dict: _EnumDict ) -> type[TextChoices]: """Create the metaclass and cast it into the proper type.""" new_cls = ChoicesType(class_name, (TextChoices,), cls_dict) return cast("type[TextChoices]", new_cls) def text_choices_from_enum( enum_cls: type[Enum], class_name: str = "" ) -> type[TextChoices]: """Create TextChoices from an enum.""" if not class_name: class_name = enum_cls.__name__.removesuffix("Enum") + "Choices" cls_dict = _prepare_text_choices_class_dict(class_name) for member in enum_cls: cls_dict[member.name] = member.value return _create_text_choices_class(class_name, cls_dict) def text_choices_from_map(choices_map: Mapping, class_name: str) -> type[TextChoices]: """Create TextChoices from an Mapping.""" cls_dict = _prepare_text_choices_class_dict(class_name) for name, value in choices_map.items(): cls_dict[name] = value return _create_text_choices_class(class_name, cls_dict) def text_choices_from_string(string: str, class_name: str) -> type[TextChoices]: """Create TextChoices from an enum.""" cls_dict = _prepare_text_choices_class_dict(class_name) for c in string: cls_dict[c.upper()] = c return _create_text_choices_class(class_name, cls_dict) def max_choices_len(choices: type[Choices]) -> int: """Return the maximum possible size for a Choice's key.""" if not choices.choices: return 0 return max(len(choice[0]) for choice in choices.choices) # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] FileTypeChoices = text_choices_from_enum(FileTypeEnum) ReadingDirectionChoices = text_choices_from_enum(ReadingDirectionEnum) ================================================ FILE: codex/models/comic.py ================================================ """Comic model.""" import calendar import math import re from datetime import MAXYEAR, MINYEAR, date from pathlib import Path from typing import override from comicbox.enums.comicbox import ReadingDirectionEnum from django.db.models import ( CASCADE, BooleanField, CharField, DateField, DateTimeField, ForeignKey, ManyToManyField, OneToOneField, PositiveIntegerField, TextField, ) from codex.models.base import ( MAX_ISSUE_SUFFIX_LEN, MAX_NAME_LEN, BaseModel, ) from codex.models.choices import ( FileTypeChoices, ReadingDirectionChoices, max_choices_len, ) from codex.models.fields import ( CleaningCharField, CleaningTextField, CoercingDecimalField, CoercingPositiveSmallIntegerField, ) from codex.models.groups import ( Folder, Imprint, Publisher, Series, Volume, WatchedPathBrowserGroup, ) from codex.models.identifier import Identifier from codex.models.named import ( AgeRating, Character, Country, Credit, Genre, Language, Location, OriginalFormat, ScanInfo, SeriesGroup, Story, StoryArcNumber, Tag, Tagger, Team, Universe, ) __all__ = ("Comic",) class Comic(WatchedPathBrowserGroup): """Comic metadata.""" _ORDERING = ( "issue_number", "issue_suffix", "sort_name", ) _RE_COMBINE_WHITESPACE = re.compile(r"\s+") # From BaseModel, but Comics are sorted by these so index them created_at = DateTimeField(auto_now_add=True, db_index=True) updated_at = DateTimeField(auto_now=True, db_index=True) # From WatchedPath, but interferes with related_name from folders m2m field parent_folder: ForeignKey | None = ForeignKey( "Folder", on_delete=CASCADE, null=True, related_name="comic_in" ) # Unique comic fields collection_title = CleaningCharField( db_index=True, max_length=MAX_NAME_LEN, default="", db_collation="nocase", ) issue_number = CoercingDecimalField( db_index=True, decimal_places=2, max_digits=10, null=True ) issue_suffix = CleaningCharField( db_index=True, max_length=MAX_ISSUE_SUFFIX_LEN, default="", db_collation="nocase", ) # Group FKs volume = ForeignKey(Volume, db_index=True, on_delete=CASCADE) series = ForeignKey(Series, db_index=True, on_delete=CASCADE) imprint = ForeignKey(Imprint, db_index=True, on_delete=CASCADE) publisher = ForeignKey(Publisher, db_index=True, on_delete=CASCADE) # Other FKs age_rating = ForeignKey(AgeRating, db_index=True, null=True, on_delete=CASCADE) original_format = ForeignKey( OriginalFormat, null=True, db_index=True, on_delete=CASCADE ) scan_info = ForeignKey(ScanInfo, db_index=True, null=True, on_delete=CASCADE) tagger = ForeignKey(Tagger, db_index=True, null=True, on_delete=CASCADE) main_character = ForeignKey( Character, db_index=True, null=True, on_delete=CASCADE, related_name="main_character_in_comics", ) main_team = ForeignKey( Character, db_index=True, null=True, on_delete=CASCADE, related_name="main_team_in_comics", ) # Alpha2 codes country = ForeignKey(Country, db_index=True, null=True, on_delete=CASCADE) language = ForeignKey(Language, db_index=True, null=True, on_delete=CASCADE) # Date year = CoercingPositiveSmallIntegerField(db_index=True, null=True) month = CoercingPositiveSmallIntegerField(db_index=True, null=True) day = CoercingPositiveSmallIntegerField(db_index=True, null=True) # Text summary = CleaningCharField(default="", db_collation="nocase") review = CleaningTextField(default="", db_collation="nocase") notes = CleaningTextField(default="", db_collation="nocase") # Ratings critical_rating = CoercingDecimalField( db_index=True, decimal_places=2, max_digits=5, default=None, null=True ) # Reader page_count = CoercingPositiveSmallIntegerField(db_index=True, default=0) reading_direction = CleaningCharField( db_index=True, choices=ReadingDirectionChoices.choices, default=ReadingDirectionEnum.LTR.value, max_length=max_choices_len(ReadingDirectionChoices), db_collation="nocase", ) # Misc monochrome = BooleanField(db_index=True, default=False) # ManyToMany characters = ManyToManyField(Character) credits = ManyToManyField(Credit) genres = ManyToManyField(Genre) identifiers = ManyToManyField(Identifier) locations = ManyToManyField(Location) series_groups = ManyToManyField(SeriesGroup) stories = ManyToManyField(Story) story_arc_numbers = ManyToManyField(StoryArcNumber) tags = ManyToManyField(Tag) teams = ManyToManyField(Team) universes = ManyToManyField(Universe) ##################### # Comicbox Ignored: # alternate_issue # alternate_volumes # cover_image # is_version_of # last_mark # manga # price # rights # codex only date = DateField(db_index=True, null=True) decade = CoercingPositiveSmallIntegerField(db_index=True, null=True) folders = ManyToManyField(Folder) size = PositiveIntegerField(db_index=True) file_type = CleaningCharField( db_index=True, choices=FileTypeChoices.choices, max_length=max_choices_len(FileTypeChoices), blank=True, default="", db_collation="nocase", ) metadata_mtime = DateTimeField(null=True) # Not useful custom_cover: ForeignKey | None = None class Meta(WatchedPathBrowserGroup.Meta): """Constraints.""" verbose_name = "Issue" def _set_date(self) -> None: """Compute a date for the comic.""" year = MINYEAR if self.year is None else min(max(self.year, MINYEAR), MAXYEAR) month = 1 if self.month is None else min(max(self.month, 1), 12) if self.day is None: day = 1 else: last_day_of_month = calendar.monthrange(year, month)[1] day = min(max(self.day, 1), last_day_of_month) self.date = date(year, month, day) def _set_decade(self) -> None: """Compute a decade for the comic.""" if self.year is None: self.decade = None else: self.decade = self.year - (self.year % 10) @override def presave(self) -> None: """Set computed values.""" super().presave() self._set_date() self._set_decade() self.size = Path(self.path).stat().st_size @property def max_page(self): """Calculate max page from page_count.""" return max(self.page_count - 1, 0) @staticmethod def _compute_zero_pad(issue_number_max) -> int: """Compute zero padding for issues.""" if issue_number_max is None: issue_number_max = 100 if issue_number_max < 1: return 1 return math.floor(math.log10(issue_number_max)) + 1 def get_filename(self) -> str: """Return filename from path as a property.""" return Path(self.path).name @classmethod def _get_title_issue_str(cls, obj, zero_pad) -> str: """Get the issue parts of the title.""" issue_str = "" if obj.issue_number is not None: issue_number = obj.issue_number.normalize() if not zero_pad: zero_pad = 3 if issue_number % 1 == 0: precision = 0 else: precision = 1 zero_pad += 2 issue_str = f"#{issue_number:0{zero_pad}.{precision}f}" if issue_suffix := obj.issue_suffix: issue_str += issue_suffix return issue_str @classmethod def get_title( cls, obj, *, volume: bool, name: bool, filename_fallback: bool, zero_pad=None ) -> str: """Create the comic title for display.""" if not obj: return "" names = [] # Series if sn := obj.series.name: names.append(sn) # Volume if volume and (vn := obj.volume.name): vn_to = obj.volume.number_to vn = Volume.to_str(vn, vn_to) names.append(vn) # Issue if issue_str := cls._get_title_issue_str(obj, zero_pad): names.append(issue_str) # Title if name and obj.name: names.append(obj.name) title = " ".join(filter(None, names)).strip(" .") title = cls._RE_COMBINE_WHITESPACE.sub(" ", title).strip() if filename_fallback and not title: title = obj.get_filename() return title @override def __repr__(self) -> str: """Most common text representation for logging.""" return self.get_title(self, volume=True, name=True, filename_fallback=True) class ComicFTS(BaseModel): comic = OneToOneField(primary_key=True, to=Comic, on_delete=CASCADE) # Attributes collection_title = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) name = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) review = TextField(db_collation="nocase") summary = TextField(db_collation="nocase") # FK groups publisher = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) imprint = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) series = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) # FK age_rating = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) country = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) language = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) original_format = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) scan_info = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) tagger = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) # M2M characters = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) credits = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) genres = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) sources = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) locations = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) series_groups = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) stories = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) story_arcs = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) tags = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) teams = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) universes = CharField(db_collation="nocase", max_length=MAX_NAME_LEN) class Meta(BaseModel.Meta): managed = False ================================================ FILE: codex/models/fields.py ================================================ """Custom Django fields.""" from decimal import ROUND_DOWN, Decimal from html import unescape from typing import Any, override from django.db.models.fields import ( CharField, DecimalField, PositiveSmallIntegerField, SmallIntegerField, TextField, ) from nh3 import clean class CleaningStringFieldMixin: """Sanitizing Mixin for CharField & TextField.""" def get_prep_value(self, value): """Truncate, sanitize and unescape.""" if value := super().get_prep_value(value): # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] value = value[: self.max_length] # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] value = clean(value) value = unescape(value) return value class CleaningCharField(CleaningStringFieldMixin, CharField): """Sanitizing Truncating CharField.""" class CleaningTextField(CleaningStringFieldMixin, TextField): """Sanitizing Truncating TextField.""" class CoercingSmallIntegerFieldMixin: """Custom IntegerField Mixin that coerces values into a range.""" COERCE_MIN: int = 2**15 * -1 COERCE_MAX: int = 2**15 - 1 def get_prep_value(self, value): """Coerce int into range before insertion.""" value = super().get_prep_value(value) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if value is not None: value = max(min(value, self.COERCE_MAX), self.COERCE_MIN) return value class CoercingSmallIntegerField(CoercingSmallIntegerFieldMixin, SmallIntegerField): """Custom SmallIntegerField.""" class CoercingPositiveSmallIntegerField( CoercingSmallIntegerFieldMixin, PositiveSmallIntegerField ): """Custom PositiveSmallIntegerField.""" COERCE_MIN: int = 0 class CoercingDecimalField(DecimalField): """Custom DecimalField.""" def __init__(self, *args, **kwargs) -> None: """Init coercing values.""" super().__init__(*args, **kwargs) self._quantize_str = Decimal(f"1e-{self.decimal_places}") self._decimal_max = Decimal(10 ** (self.max_digits - 2) - 1) @override def get_prep_value(self, value) -> Any: """Coerce Decimal.""" prepped_value: Decimal | None = super().get_prep_value(value) if prepped_value is not None: prepped_value = prepped_value.quantize( self._quantize_str, rounding=ROUND_DOWN ) prepped_value = prepped_value.min(self._decimal_max) return prepped_value ================================================ FILE: codex/models/functions.py ================================================ """Custom Django DB functions.""" from typing import override from django.db.models.aggregates import Aggregate from django.db.models.expressions import Func from django.db.models.fields import CharField, FloatField, TextField from django.db.models.fields.json import JSONField from django.db.models.fields.related import OneToOneField from django.db.models.lookups import Lookup from codex.models.fields import CleaningCharField, CleaningTextField class JsonGroupArray(Aggregate): """Sqlite3 JSON_GROUP_ARRAY function.""" allow_distinct = True allow_order_by = True function = "JSON_GROUP_ARRAY" name = "JsonGroupArray" def __init__(self, *args, **kwargs) -> None: """output_field is set in the constructor.""" super().__init__(*args, output_field=JSONField(), **kwargs) class GroupConcat(Aggregate): """Sqlite3 GROUP_CONCAT.""" # Defaults to " " separator which is all I need for now. allow_distinct = True allow_order_by = True function = "GROUP_CONCAT" name = "GroupConcat" def __init__(self, *args, **kwargs) -> None: """output_field is set in the constructor.""" super().__init__(*args, output_field=CharField(), **kwargs) @OneToOneField.register_lookup class FTS5Match(Lookup): """Sqlite3 FTS5 MATCH lookup.""" lookup_name = "match" @override def as_sql(self, compiler, connection) -> tuple: """Generate MATCH sql.""" rhs, rhs_params = self.process_rhs(compiler, connection) # MATCH works on the table itself not the one_to_one rel. # Force the table name without substitutions by the optimizer sql = "codex_comicfts MATCH " + rhs params = rhs_params return sql, params @CharField.register_lookup @TextField.register_lookup @CleaningCharField.register_lookup @CleaningTextField.register_lookup class Like(Lookup): """SQL LIKE lookup.""" lookup_name = "like" prepare_rhs = False @override def as_sql(self, compiler, connection) -> tuple: """Generate LIKE sql.""" lhs, lhs_params = self.process_lhs(compiler, connection) rhs, rhs_params = self.process_rhs(compiler, connection) params = lhs_params + rhs_params sql = f"{lhs} LIKE {rhs}" return sql, params class ComicFTSRank(Func): """Sqlite3 FTS5 inverse rank function.""" function = "rank" template = '("codex_comicfts"."rank" * -1)' def __init__(self, *args, **kwargs) -> None: """output_field is set in the constructor.""" super().__init__(*args, output_field=FloatField(), **kwargs) ================================================ FILE: codex/models/groups.py ================================================ """Browser Group models.""" from typing import override from django.db.models import CASCADE, SET_DEFAULT, ForeignKey from codex.models.base import MAX_NAME_LEN, BaseModel from codex.models.fields import CleaningCharField, CoercingPositiveSmallIntegerField from codex.models.identifier import Identifier from codex.models.paths import CustomCover, WatchedPath from codex.models.util import get_sort_name __all__ = ( "BrowserGroupModel", "Folder", "IdentifiedBrowserGroupModel", "Imprint", "Publisher", "Series", "Volume", ) class BrowserGroupModel(BaseModel): """Browser groups.""" DEFAULT_NAME: str | None = "" PARENT: str = "" name = CleaningCharField( db_index=True, max_length=MAX_NAME_LEN, default=DEFAULT_NAME ) sort_name = CleaningCharField( db_index=True, max_length=MAX_NAME_LEN, default=DEFAULT_NAME, db_collation="nocase", ) custom_cover: ForeignKey | None = ForeignKey( CustomCover, on_delete=SET_DEFAULT, null=True, default=None ) def set_sort_name(self) -> None: """Create sort_name for model.""" self.sort_name = get_sort_name(self.name) @override def presave(self) -> None: """Set computed values.""" self.set_sort_name() @override def save(self, *args, **kwargs) -> None: """Save computed fields.""" self.presave() super().save(*args, **kwargs) class Meta(BaseModel.Meta): """Without this a real table is created and joined to.""" abstract = True def _repr_parts(self) -> tuple[str, ...]: return (self.name,) @override def __repr__(self) -> str: """Represent as string.""" return "/".join(self._repr_parts()) class IdentifiedBrowserGroupModel(BrowserGroupModel): """ Identified Browser Group Model. Comicbox objects can have multiple identifiers, but if I let BrowserGroups have them then it would impossible to unlink a second level m2m relationship when comics are deleted. So I choose the highest priority one in import. Additionally, Browser groups will update to the highest priority identifier by source instead of creating duplicate groups to keep the hierarchy consolidated. """ identifier = ForeignKey(Identifier, on_delete=CASCADE, null=True) class Meta(BrowserGroupModel.Meta): """Without this a real table is created and joined to.""" abstract = True class Publisher(IdentifiedBrowserGroupModel): """The publisher of the comic.""" class Meta(IdentifiedBrowserGroupModel.Meta): """Constraints.""" unique_together = ("name",) class Imprint(IdentifiedBrowserGroupModel): """A Publishing imprint.""" PARENT: str = "publisher" publisher = ForeignKey(Publisher, on_delete=CASCADE) class Meta(IdentifiedBrowserGroupModel.Meta): """Constraints.""" unique_together = ("publisher", "name") @override def _repr_parts(self) -> tuple: return (self.publisher.name, self.name) class Series(IdentifiedBrowserGroupModel): """The series the comic belongs to.""" PARENT: str = "imprint" publisher = ForeignKey(Publisher, on_delete=CASCADE) imprint = ForeignKey(Imprint, on_delete=CASCADE) volume_count = CoercingPositiveSmallIntegerField(null=True) class Meta(IdentifiedBrowserGroupModel.Meta): """Constraints.""" unique_together = ("imprint", "name") verbose_name_plural = "Series" @override def _repr_parts(self) -> tuple: return ( self.publisher.name, self.imprint.name, self.name, ) class Volume(BrowserGroupModel): """The volume of the series the comic belongs to.""" DEFAULT_NAME: str | None = None PARENT: str = "series" YEAR_LEN = 4 publisher = ForeignKey(Publisher, on_delete=CASCADE) imprint = ForeignKey(Imprint, on_delete=CASCADE) series = ForeignKey(Series, on_delete=CASCADE) issue_count = CoercingPositiveSmallIntegerField(null=True) name = CoercingPositiveSmallIntegerField( # pyright: ignore[reportIncompatibleUnannotatedOverride] db_index=True, null=True, default=DEFAULT_NAME ) number_to = CoercingPositiveSmallIntegerField( db_index=True, null=True, default=DEFAULT_NAME ) # Harmful because name is numeric sort_name = None # pyright: ignore[reportIncompatibleUnannotatedOverride] custom_cover = None @override def set_sort_name(self): """Noop.""" class Meta(BrowserGroupModel.Meta): """Constraints.""" unique_together = ("series", "name", "number_to") @classmethod def to_str(cls, number: int | None, number_to: int | None) -> str: """Represent volume as a string.""" if number is None: rep = "" else: number_str = str(number) is_year = len(number_str) == cls.YEAR_LEN if number_to is not None: numbers_strs = (number_str, str(number_to)) numbers_str = "-".join(numbers_strs) else: numbers_str = number_str rep = f"({numbers_str})" if is_year else f"v{numbers_str}" return rep @override def _repr_parts(self) -> tuple: """Represent volume as a string.""" return ( self.publisher.name, self.imprint.name, self.series.name, self.to_str(self.name, self.number_to), ) class WatchedPathBrowserGroup(BrowserGroupModel, WatchedPath): """Watched Path Browser Group.""" @override def presave(self) -> None: """Fix multiple inheritance presave.""" super().presave() WatchedPath.presave(self) class Meta(BrowserGroupModel.Meta, WatchedPath.Meta): """Use Mixin Meta.""" abstract = True class Folder(WatchedPathBrowserGroup): """File system folder.""" ================================================ FILE: codex/models/identifier.py ================================================ """Identifier Models.""" from typing import override from django.db.models import ( CASCADE, CharField, ForeignKey, TextChoices, URLField, ) from codex.models.base import MAX_NAME_LEN, BaseModel, NamedModel from codex.models.fields import CleaningCharField __all__ = ("Identifier", "IdentifierSource", "NamedModel") _IDENTIFIER_TYPE_MAX_LENGTH = 16 class IdentifierSource(NamedModel): """A an Identifier's source.""" class IdentifierType(TextChoices): """ The identifier type for the source. Values are table names. """ ARC = "storyarc" CHARACTER = "character" GENRE = "genre" IMPRINT = "imprint" ISSUE = "comic" LOCATION = "location" PUBLISHER = "publisher" # REPRINT = "reprint" not yet implemented SERIES = "series" STORY = "story" TAG = "tag" TEAM = "team" UNIVERSE = "universe" ROLE = "creditrole" CREATOR = "creditperson" class Identifier(BaseModel): """ A method of identifying the comic. The only class with a url. """ source = ForeignKey(IdentifierSource, db_index=True, on_delete=CASCADE, null=True) id_type = CharField( choices=IdentifierType.choices, db_index=True, max_length=_IDENTIFIER_TYPE_MAX_LENGTH, ) key = CleaningCharField(max_length=MAX_NAME_LEN) url = URLField(default="") class Meta(BaseModel.Meta): """Declare constraints and indexes.""" unique_together: tuple[str, ...] = ("source", "id_type", "key") @property def name(self) -> str: """Provide a urn like name to imitate a NamedModel.""" source_name = f"{self.source.name}:" if self.source else "" parts = (source_name, self.id_type, self.key) return ":".join(parts) @override def __repr__(self) -> str: """Represent as a string.""" return self.name + ":" + self.url ================================================ FILE: codex/models/library.py ================================================ """Library model.""" from datetime import timedelta from pathlib import Path from types import MappingProxyType from typing import override from django.contrib.auth.models import Group from django.core.exceptions import ValidationError from django.db.models import ( BooleanField, CharField, DateTimeField, DurationField, ManyToManyField, ) from django.utils.translation import gettext_lazy as _ from codex.models.base import MAX_PATH_LEN, BaseModel __all__ = ("Library", "validate_dir_exists") def validate_dir_exists(path) -> None: """Validate that a library exists.""" if not Path(path).is_dir(): raise ValidationError(_("{path} is not a directory"), params={"path": path}) class Library(BaseModel): """The library comic file live under.""" DEFAULT_POLL_EVERY_SECONDS = 60 * 60 DEFAULT_POLL_EVERY = timedelta(seconds=DEFAULT_POLL_EVERY_SECONDS) CUSTOM_COVERS_DIR_DEFAULTS = MappingProxyType( { "covers_only": True, "events": False, "poll": False, } ) covers_only = BooleanField(db_index=True, default=False) path = CharField( unique=True, db_index=True, max_length=MAX_PATH_LEN, validators=[validate_dir_exists], ) events = BooleanField(db_index=True, default=True) poll = BooleanField(db_index=True, default=True) poll_every = DurationField(default=DEFAULT_POLL_EVERY) last_poll = DateTimeField(null=True) update_in_progress = BooleanField(default=False) groups = ManyToManyField(Group, blank=True) @override def __repr__(self) -> str: """Return the path.""" return str(self.path) class Meta(BaseModel.Meta): """Pluralize.""" verbose_name_plural = "Libraries" def _save_update_in_progress(self, *, value: bool) -> None: self.update_in_progress = value self.save(update_fields=["update_in_progress"]) def start_update(self) -> None: """Start a library update.""" self._save_update_in_progress(value=True) def end_update(self) -> None: """Finish a library update.""" self._save_update_in_progress(value=False) ================================================ FILE: codex/models/named.py ================================================ """Named models.""" from typing import override from django.db.models import ( CASCADE, ForeignKey, ) from codex.models.base import MAX_NAME_LEN, BaseModel, NamedModel from codex.models.fields import CleaningCharField, CoercingPositiveSmallIntegerField from codex.models.groups import BrowserGroupModel from codex.models.identifier import Identifier __all__ = ( "AgeRating", "Character", "Country", "Credit", "CreditPerson", "CreditRole", "Genre", "Language", "Location", "OriginalFormat", "ScanInfo", "SeriesGroup", "Story", "StoryArc", "StoryArcNumber", "Tag", "Tagger", "Team", "Universe", ) class IdentifiedNamedModel(NamedModel): """ For NamedModels with Identifiers. Comicbox objects can have multiple identifiers, but if I let NamedModels have them then it would impossible to unlink a second level m2m relationship when comics are deleted. So I choose the highest priority one in import. """ identifier = ForeignKey(Identifier, on_delete=CASCADE, null=True) class Meta(NamedModel.Meta): """Defaults to uniquely named, must be overridden.""" abstract = True @override def __repr__(self) -> str: """Return the name.""" suffix = ":" + str(self.identifier) if self.identifier else "" return self.name + suffix class AgeRating(NamedModel): """The Age Rating the comic was intended for.""" class Character(IdentifiedNamedModel): """A character that appears in the comic.""" class CreditPerson(IdentifiedNamedModel): """Credited persons.""" class CreditRole(IdentifiedNamedModel): """A role for the credited person. Writer, Inker, etc.""" class Credit(BaseModel): """A credit.""" person = ForeignKey(CreditPerson, on_delete=CASCADE) role = ForeignKey(CreditRole, on_delete=CASCADE, null=True) class Meta(BaseModel.Meta): """Constraints.""" unique_together = ("person", "role") @override def __repr__(self) -> str: """Return the strings of parts.""" return str(self.person) + ":" + str(self.role) class Country(NamedModel): """The two letter country code.""" class Meta(NamedModel.Meta): """Constraints.""" verbose_name_plural = "Countries" class Genre(IdentifiedNamedModel): """The genre the comic belongs to.""" class Language(NamedModel): """The two letter language code.""" class Location(IdentifiedNamedModel): """A location that appears in the comic.""" class OriginalFormat(NamedModel): """The original published format.""" class ScanInfo(NamedModel): """Whomever scanned the comic.""" class SeriesGroup(NamedModel): """A series group the series is part of.""" class Story(IdentifiedNamedModel): """A story in a commic.""" class Meta(IdentifiedNamedModel.Meta): """Constraints.""" verbose_name_plural = "Stories" class StoryArc(IdentifiedNamedModel, BrowserGroupModel): """A story arc the comic is part of.""" class Meta(IdentifiedNamedModel.Meta, BrowserGroupModel.Meta): """Fix Meta inheritance.""" class StoryArcNumber(BaseModel): """A story arc number the comic represents.""" story_arc = ForeignKey(StoryArc, db_index=True, on_delete=CASCADE) number = CoercingPositiveSmallIntegerField(null=True, default=None) class Meta(BaseModel.Meta): """Declare constraints and indexes.""" unique_together = ("story_arc", "number") @property def name(self): """Provide a name to imitate a NamedModel.""" suffix = f":{self.number}" if self.number is not None else "" return self.story_arc.name + suffix class Tag(IdentifiedNamedModel): """Arbitrary Metadata Tag.""" class Tagger(NamedModel): """Tagger program.""" class Team(IdentifiedNamedModel): """A team that appears in the comic.""" class Universe(IdentifiedNamedModel): """Universe the comic appears in.""" designation = CleaningCharField(max_length=MAX_NAME_LEN) @override def __repr__(self) -> str: """Provide a name to imitate a NamedModel.""" name = self.name + ":" + str(self.designation) if self.identifier: name += ":" + str(self.identifier) return name ================================================ FILE: codex/models/paths.py ================================================ """Watched Path models.""" from pathlib import Path from types import MappingProxyType from typing import override from django.db.models import CASCADE, CharField, ForeignKey, JSONField, TextChoices from codex.models.base import MAX_NAME_LEN, MAX_PATH_LEN, BaseModel from codex.models.choices import max_choices_len from codex.models.library import Library from codex.models.util import get_sort_name __all__ = ("CustomCover", "FailedImport") class WatchedPath(BaseModel): """A filesystem path with data for Watcher diffs.""" library = ForeignKey(Library, on_delete=CASCADE, db_index=True) parent_folder: ForeignKey | None = ForeignKey( "Folder", on_delete=CASCADE, null=True, ) path = CharField(max_length=MAX_PATH_LEN, db_index=True) stat = JSONField(null=True) ZERO_STAT = (0, 0, 0, 0, 0, 0, 0, 0, 0.0, 0) def set_stat(self) -> None: """Set select stat params from the filesystem.""" path = Path(str(self.path)) st_record = path.stat() # Converting os.stat directly to a list or tuple saves # mtime as an int and causes problems. st = list(self.ZERO_STAT) st[0] = st_record.st_mode st[1] = st_record.st_ino # st[2] = st_record.st_dev is ignored by diff # st[3] = st_record.st_nlink ignored # st[4] = st_record.st_uid ignored # st[5] = st_record.st_gid ignored st[6] = st_record.st_size # st[7] = st_record.st_atime ignored st[8] = st_record.st_mtime self.stat = st @override def presave(self) -> None: """Save stat.""" self.set_stat() @override def __repr__(self) -> str: """Return the full path.""" return str(self.path) class Meta(BaseModel.Meta): """Use Mixin Meta.""" unique_together = ("library", "path") abstract = True def search_path(self) -> str: """Relative path for search index.""" return self.path.removeprefix(self.library.path) class FailedImport(WatchedPath): """Failed Comic Imports. Displayed in Admin Panel.""" name = CharField(db_index=True, max_length=MAX_NAME_LEN, default="") def set_reason(self, exc) -> None: """Can't do this in save() because it breaks update_or_create.""" reason = str(exc) suffixes = (f": {self.path}", f": {self.path!r}") for suffix in suffixes: reason = reason.removesuffix(suffix) reason = reason[:MAX_NAME_LEN] self.name = reason.strip() class CustomCover(WatchedPath): """Custom Cover Image.""" class GroupChoices(TextChoices): """Reading direction choices.""" P = "p" I = "i" # noqa: E741 S = "s" # no V A = "a" F = "f" FOLDER_COVER_STEM = ".codex-cover" DIR_GROUP_CHOICE_MAP = MappingProxyType( { "publishers": GroupChoices.P.value, "imprints": GroupChoices.I.value, "series": GroupChoices.S.value, "story-arcs": GroupChoices.A.value, } ) parent_folder: ForeignKey | None = None group = CharField( max_length=max_choices_len(GroupChoices), db_index=True, choices=GroupChoices.choices, ) sort_name = CharField( max_length=MAX_NAME_LEN, db_index=True, default="", db_collation="nocase" ) def _set_group_and_sort_name(self) -> None: """Set group and sort_name from path.""" path = Path(self.path) stem = path.stem if stem == self.FOLDER_COVER_STEM: group = self.GroupChoices.F.value else: group = self.DIR_GROUP_CHOICE_MAP[path.parent.name] self.sort_name = get_sort_name(stem) self.group = group @override def presave(self) -> None: """Presave group and sort_name.""" super().presave() self._set_group_and_sort_name() ================================================ FILE: codex/models/query.py ================================================ """Custom Compiler to force group_by.""" # If any group_by() is attached to the QuerySet, it completely overrides the compiler computed group_by from typing import Self, override from django.db import connections from django.db.models import Manager from django.db.models.query import QuerySet from django.db.models.sql.compiler import SQLCompiler from django.db.models.sql.query import Query class GroupBySQLCompiler(SQLCompiler): """Custom Compiler to force group_by.""" def __init__(self, *args, **kwargs) -> None: """Initialize force group_by fields.""" super().__init__(*args, **kwargs) self.force_group_by_table = "" self.force_group_by_fields = () def set_force_group_by(self, table, fields) -> None: """Set the force group_by variables.""" self.force_group_by_table = table self.force_group_by_fields = fields @override def get_group_by(self, *args, **kwargs) -> list: """If force group_by set, force it.""" if self.force_group_by_table and self.force_group_by_fields: table = self.force_group_by_table group_by = [] for field in self.force_group_by_fields: field_str = f'"{table}"."{field}"' entry = (field_str, ()) group_by.append(entry) else: group_by = super().get_group_by(*args, **kwargs) return group_by class GroupByQuery(Query): """Custom Query to use GroupBy Compiler.""" def __init__(self, *args, **kwargs) -> None: """Init force group_by fields.""" super().__init__(*args, **kwargs) self.force_group_by_table = "" self.force_group_by_fields = () @override def get_compiler( self, using=None, connection=None, elide_empty=True ) -> GroupBySQLCompiler | SQLCompiler: """Use the custom compiler instead of SQLCompiler.""" if self.compiler == "SQLCompiler": if using is None and connection is None: reason = "Need either using or connection" raise ValueError(reason) if using: connection = connections[using] compiler = GroupBySQLCompiler(self, connection, using, elide_empty) compiler.set_force_group_by( self.force_group_by_table, self.force_group_by_fields ) else: compiler = super().get_compiler( using=using, connection=connection, elide_empty=elide_empty, # pyright: ignore[reportCallIssue], # ty: ignore[unknown-argument] ) return compiler def set_force_group_by(self, fields, model=None) -> None: """Set the force group_by fields.""" if not model: model = self.model table = model._meta.db_table if model else "" self.force_group_by_table = table self.force_group_by_fields = fields class GroupByQuerySet(QuerySet): """Custom Queryset that uses Custom compiler.""" def __init__(self, model=None, query=None, using=None, hints=None) -> None: """Use the custom query with the custom compiler.""" query = query or GroupByQuery(model) super().__init__(model=model, query=query, using=using, hints=hints) def group_by(self, *fields, model=None) -> Self: """Force group_by operator.""" obj = self._chain() # pyright: ignore[reportAttributeAccessIssue] obj.query.set_force_group_by(fields, model=model) return obj def demote_joins(self, tables) -> Self: """Force INNER JOINS.""" obj = self._chain() # pyright: ignore[reportAttributeAccessIssue] if valid_tables := tables & set(obj.query.alias_map): obj.query.demote_joins(valid_tables) return obj class GroupByManager(Manager.from_queryset(GroupByQuerySet)): # ty: ignore[unsupported-base] """Use GroupBy QuerySet.""" ================================================ FILE: codex/models/settings.py ================================================ """User/session settings models.""" from typing import override from django.conf import settings from django.contrib.sessions.models import Session from django.db.models import ( CASCADE, PROTECT, SET_NULL, BooleanField, CharField, CheckConstraint, ForeignKey, JSONField, OneToOneField, PositiveSmallIntegerField, Q, TextChoices, UniqueConstraint, ) from codex.choices.browser import ( BROWSER_BOOKMARK_FILTER_CHOICES, BROWSER_ORDER_BY_CHOICES, BROWSER_ROUTE_CHOICES, BROWSER_TOP_GROUP_CHOICES, ) from codex.models.base import MAX_NAME_LEN, BaseModel from codex.models.choices import ReadingDirectionChoices, max_choices_len __all__ = ( "SettingsBrowser", "SettingsBrowserFilters", "SettingsBrowserLastRoute", "SettingsBrowserShow", "SettingsReader", ) # Custom on_delete handlers # # Django's migration serializer resolves on_delete by module path + function # name, so these must be proper top-level functions — not closures. def cascade_if_session_null( collector, field, sub_objs, using, # noqa: ARG001 ): """ Cascade delete only when the session FK is also null. Used as on_delete for the user FK. When a user is deleted: - Rows where session is also null are orphaned to cascade delete. - Rows where session is set to just null out the user FK. """ orphans = [obj for obj in sub_objs if obj.session_id is None] if orphans: collector.collect( orphans, source=field.remote_field.model, source_attr=field.name, nullable=field.null, ) if field.null: collector.add_field_update(field, None, sub_objs) def cascade_if_user_null( collector, field, sub_objs, using, # noqa: ARG001 ): """ Cascade delete only when the user FK is also null. Used as on_delete for the session FK. When a session is deleted: - Rows where user is also null are orphaned to cascade delete. - Rows where user is set to just null out the session FK. """ orphans = [obj for obj in sub_objs if obj.user_id is None] if orphans: collector.collect( orphans, source=field.remote_field.model, source_attr=field.name, nullable=field.null, ) if field.null: collector.add_field_update(field, None, sub_objs) # Shared choices class ClientChoices(TextChoices): """API vs OPDS client type.""" API = "api", "API" OPDS = "opds", "OPDS" class FitToChoices(TextChoices): """Reader fit-to choices.""" SCREEN = "S" WIDTH = "W" HEIGHT = "H" ORIG = "O" ################# # Abstract base # ################# class SettingsBase(BaseModel): """Abstract base for per-user / per-session settings.""" client = CharField( max_length=max_choices_len(ClientChoices), choices=ClientChoices.choices, default=ClientChoices.API, db_index=True, ) user = ForeignKey( settings.AUTH_USER_MODEL, db_index=True, on_delete=cascade_if_session_null, null=True, blank=True, ) session = ForeignKey( Session, db_index=True, on_delete=cascade_if_user_null, null=True, blank=True, ) class Meta(BaseModel.Meta): """Abstract base settings.""" abstract = True @override def __repr__(self) -> str: return ( f"<{self.__class__.__name__}" f" client={self.client}" f" user={self.user!r}" f" session={self.session!r}>" ) ##################################### # Browser Settings — related models # ##################################### class SettingsBrowserShow(BaseModel): """ Show-group boolean grid. Shared across SettingsBrowser rows — created but never deleted. With 4 booleans there are at most 16 distinct rows (in practice ~6). """ p = BooleanField(default=True) i = BooleanField(default=False) s = BooleanField(default=True) v = BooleanField(default=False) class Meta(BaseModel.Meta): """Browser show-flag settings.""" verbose_name_plural = "browser show settings" constraints = ( UniqueConstraint( fields=("p", "i", "s", "v"), name="unique_settingsbrowsershow_flags", ), ) @override def __repr__(self) -> str: return f"" class SettingsBrowserFilters(BaseModel): """ Filter columns for a single SettingsBrowser row. One-to-one with SettingsBrowser — created and deleted together. """ browser = OneToOneField( "codex.SettingsBrowser", on_delete=CASCADE, related_name="filters", ) # Bookmark filter (choice, not a list of ints) bookmark = CharField( max_length=16, choices=tuple(BROWSER_BOOKMARK_FILTER_CHOICES.items()), default="", blank=True, ) # Dynamic filters — each stores a list of ints. age_rating = JSONField(default=list) characters = JSONField(default=list) country = JSONField(default=list) credits = JSONField(default=list) critical_rating = JSONField(default=list) decade = JSONField(default=list) file_type = JSONField(default=list) genres = JSONField(default=list) identifier_source = JSONField(default=list) language = JSONField(default=list) locations = JSONField(default=list) monochrome = JSONField(default=list) original_format = JSONField(default=list) reading_direction = JSONField(default=list) series_groups = JSONField(default=list) stories = JSONField(default=list) story_arcs = JSONField(default=list) tagger = JSONField(default=list) tags = JSONField(default=list) teams = JSONField(default=list) universes = JSONField(default=list) year = JSONField(default=list) FILTER_KEYS = frozenset( { "bookmark", "age_rating", "characters", "country", "credits", "critical_rating", "decade", "file_type", "genres", "identifier_source", "language", "locations", "monochrome", "original_format", "reading_direction", "series_groups", "stories", "story_arcs", "tagger", "tags", "teams", "universes", "year", } ) class Meta(BaseModel.Meta): """Browser filter settings.""" verbose_name_plural = "browser filter settings" @override def __repr__(self) -> str: return f"" class SettingsBrowserLastRoute(BaseModel): """ Last-route columns for a single SettingsBrowser row. One-to-one with SettingsBrowser — created and deleted together. """ browser = OneToOneField( "codex.SettingsBrowser", on_delete=CASCADE, related_name="last_route", ) group = CharField( max_length=1, choices=tuple(BROWSER_ROUTE_CHOICES.items()), default="r", ) pks = JSONField(default=list) page = PositiveSmallIntegerField(default=1) class Meta(BaseModel.Meta): """Browser last-route settings.""" verbose_name_plural = "browser last-route settings" @override def __repr__(self) -> str: return ( f"" ) #################### # Browser Settings # #################### class SettingsBrowser(SettingsBase): """Persisted browser settings.""" name = CharField(max_length=MAX_NAME_LEN, default="", blank=True, db_index=True) # Browse state top_group = CharField( max_length=1, choices=tuple(BROWSER_TOP_GROUP_CHOICES.items()), default="p", ) order_by = CharField( max_length=32, choices=tuple(BROWSER_ORDER_BY_CHOICES.items()), default="", ) order_reverse = BooleanField(default=False) search = CharField(max_length=4095, default="", blank=True) # Display preferences custom_covers = BooleanField(default=True) dynamic_covers = BooleanField(default=True) twenty_four_hour_time = BooleanField(default=False) always_show_filename = BooleanField(default=False) # FK to shared show-flags row. show = ForeignKey( SettingsBrowserShow, on_delete=PROTECT, related_name="+", ) # filters and last_route live in their own tables # linked back by OneToOneField with related_name="filters" / "last_route". DIRECT_KEYS = frozenset( { "top_group", "order_by", "order_reverse", "search", "custom_covers", "dynamic_covers", "twenty_four_hour_time", "always_show_filename", } ) class Meta(SettingsBase.Meta): """Browser settings constraints.""" verbose_name_plural = "browser settings" constraints = ( UniqueConstraint( fields=("user", "client", "name"), condition=Q(user__isnull=False), name="unique_settingsbrowser_user", ), UniqueConstraint( fields=("session", "client", "name"), condition=Q(session__isnull=False), name="unique_settingsbrowser_session", ), ) ################### # Reader Settings # ################### _READER_GLOBAL_SCOPE = Q( comic__isnull=True, series__isnull=True, folder__isnull=True, story_arc__isnull=True, ) class SettingsReader(SettingsBase): """ Persisted reader settings. Scope is determined by which FK is set: - comic set per-comic settings (replaces old Bookmark model) - series set per-series settings - folder set per-folder settings - story_arc set per-story-arc settings - none set global reader defaults for the user/session """ # Scope FKs — use string references to avoid circular imports. comic = ForeignKey( "codex.Comic", db_index=True, on_delete=CASCADE, null=True, blank=True ) series = ForeignKey( "codex.Series", db_index=True, on_delete=CASCADE, null=True, blank=True ) folder = ForeignKey( "codex.Folder", db_index=True, on_delete=SET_NULL, null=True, blank=True ) story_arc = ForeignKey( "codex.StoryArc", db_index=True, on_delete=CASCADE, null=True, blank=True ) # Reader display settings fit_to = CharField( blank=True, choices=FitToChoices.choices, default="", max_length=max_choices_len(FitToChoices), ) two_pages = BooleanField(default=None, null=True) reading_direction = CharField( blank=True, choices=ReadingDirectionChoices.choices, default="", max_length=max_choices_len(ReadingDirectionChoices), ) read_rtl_in_reverse = BooleanField(default=None, null=True) finish_on_last_page = BooleanField(default=None, null=True) page_transition = BooleanField(default=None, null=True) cache_book = BooleanField(default=None, null=True) # Dict keys that map 1:1 to a model column with the same name. DIRECT_KEYS = frozenset( { "fit_to", "two_pages", "reading_direction", "read_rtl_in_reverse", "finish_on_last_page", "page_transition", "cache_book", } ) class Meta(SettingsBase.Meta): """Reader settings constraints.""" verbose_name_plural = "reader settings" constraints = ( # At most one scope FK may be set. CheckConstraint( condition=( Q( comic__isnull=True, series__isnull=True, folder__isnull=True, story_arc__isnull=True, ) | Q( comic__isnull=False, series__isnull=True, folder__isnull=True, story_arc__isnull=True, ) | Q( comic__isnull=True, series__isnull=False, folder__isnull=True, story_arc__isnull=True, ) | Q( comic__isnull=True, series__isnull=True, folder__isnull=False, story_arc__isnull=True, ) | Q( comic__isnull=True, series__isnull=True, folder__isnull=True, story_arc__isnull=False, ) ), name="settingsreader_scope_xor", ), # ---- Global scope (no comic/series/folder/story_arc) ---- UniqueConstraint( fields=("user", "client"), condition=Q(user__isnull=False) & _READER_GLOBAL_SCOPE, name="unique_settingsreader_user_global", ), UniqueConstraint( fields=("session", "client"), condition=Q(session__isnull=False) & _READER_GLOBAL_SCOPE, name="unique_settingsreader_session_global", ), # ---- Comic scope ---- UniqueConstraint( fields=("user", "client", "comic"), condition=Q(user__isnull=False, comic__isnull=False), name="unique_settingsreader_user_comic", ), UniqueConstraint( fields=("session", "client", "comic"), condition=Q(session__isnull=False, comic__isnull=False), name="unique_settingsreader_session_comic", ), # ---- Series scope ---- UniqueConstraint( fields=("user", "client", "series"), condition=Q(user__isnull=False, series__isnull=False), name="unique_settingsreader_user_series", ), UniqueConstraint( fields=("session", "client", "series"), condition=Q(session__isnull=False, series__isnull=False), name="unique_settingsreader_session_series", ), # ---- Folder scope ---- UniqueConstraint( fields=("user", "client", "folder"), condition=Q(user__isnull=False, folder__isnull=False), name="unique_settingsreader_user_folder", ), UniqueConstraint( fields=("session", "client", "folder"), condition=Q(session__isnull=False, folder__isnull=False), name="unique_settingsreader_session_folder", ), # ---- StoryArc scope ---- UniqueConstraint( fields=("user", "client", "story_arc"), condition=Q(user__isnull=False, story_arc__isnull=False), name="unique_settingsreader_user_story_arc", ), UniqueConstraint( fields=("session", "client", "story_arc"), condition=Q(session__isnull=False, story_arc__isnull=False), name="unique_settingsreader_session_story_arc", ), ) ================================================ FILE: codex/models/util.py ================================================ """Utilities for models.""" _ARTICLES = frozenset( ("a", "an", "the") # en # noqa: RUF005 + ("un", "unos", "unas", "el", "los", "la", "las") # es + ("un", "une", "le", "les", "la", "les", "l'") # fr + ("o", "a", "os") # pt # pt "as" conflicts with English + ("der", "dem", "des", "das") # de # de: "den & die conflict with English + ("il", "lo", "gli", "la", "le", "l'") # it # it: "i" conflicts with English + ("de", "het", "een") # nl + ("en", "ett") # sw + ("en", "ei", "et") # no + ("en", "et") # da + ("el", "la", "els", "les", "un", "una", "uns", "unes", "na") # ct ) def get_sort_name(name: str) -> str: """Create sort_name from name.""" lower_name = name.lower() sort_name = lower_name name_parts = lower_name.split() if len(name_parts) > 1: first_word = name_parts[0] if first_word in _ARTICLES: sort_name = " ".join(name_parts[1:]) sort_name += ", " + first_word return sort_name ================================================ FILE: codex/run.py ================================================ #!/usr/bin/env python3 """The main runnable for codex. Sets up codex and runs granian.""" import asyncio from os import execv from django.db import connection from granian.constants import HTTPModes, Interfaces from granian.server.embed import Server from loguru import logger from setproctitle import setproctitle from codex.asgi import application from codex.librarian.librariand import LibrarianDaemon from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.settings import ( DEBUG, GRANIAN_HOST, GRANIAN_HTTP, GRANIAN_PORT, GRANIAN_URL_PATH_PREFIX, GRANIAN_WEBSOCKETS, ) from codex.signals.os_signals import RESTART_EVENT, SHUTDOWN_EVENT from codex.startup import codex_init from codex.startup.loguru import loguru_init from codex.version import PACKAGE_NAME, VERSION from codex.websockets.mp_queue import BROADCAST_QUEUE def codex_startup() -> bool: """Start up codex.""" logger.info(f"Starting Codex v{VERSION}") return codex_init() def _database_checkpoint() -> None: """Write wal to disk and truncate it.""" with connection.cursor() as cursor: cursor.execute("PRAGMA wal_checkpoint(TRUNCATE);") logger.debug("checkpointed and truncated database wal") def restart() -> None: """Restart this process.""" from sys import argv print("Restarting Codex. Hold on to your butts...", flush=True) # noqa: T201 execv(__file__, argv) # noqa: S606 def codex_shutdown() -> None: """Shutdown for codex.""" _database_checkpoint() logger.success("Goodbye.") logger.complete() if RESTART_EVENT.is_set(): restart() def _build_server() -> Server: """ Build the granian embedded server. Note: the embed server runs workers as asyncio tasks (single-worker only), which lets us integrate cleanly with the SHUTDOWN_EVENT lifecycle. """ return Server( application, interface=Interfaces.ASGI, address=GRANIAN_HOST, port=GRANIAN_PORT, websockets=GRANIAN_WEBSOCKETS, http=HTTPModes(GRANIAN_HTTP), url_path_prefix=GRANIAN_URL_PATH_PREFIX, ) async def _watch_for_changes() -> None: """Watch source files and trigger restart on changes.""" from watchfiles import awatch async for _changes in awatch("codex"): logger.info("File changes detected, restarting...") RESTART_EVENT.set() SHUTDOWN_EVENT.set() break async def _serve(server: Server) -> None: """Run granian until SHUTDOWN_EVENT fires, then stop gracefully.""" server_task = asyncio.create_task(server.serve()) if DEBUG: asyncio.create_task(_watch_for_changes()) # noqa: RUF006 await SHUTDOWN_EVENT.wait() server.stop() await server_task def run() -> None: """Run Codex.""" logger.success(f"Running Codex v{VERSION}") librarian = LibrarianDaemon(logger, LIBRARIAN_QUEUE, BROADCAST_QUEUE) librarian.start() server = _build_server() asyncio.run(_serve(server)) librarian.stop() def main() -> None: """Set up and run Codex.""" logger.debug(f"Starting {PACKAGE_NAME}") setproctitle(PACKAGE_NAME) loguru_init() if codex_startup(): run() codex_shutdown() if __name__ == "__main__": main() ================================================ FILE: codex/serializers/README.md ================================================ # serializers Django Rest Framework serializers. [DRF API Guide](https://www.django-rest-framework.org/api-guide/serializers/). ================================================ FILE: codex/serializers/__init__.py ================================================ """Django Rest Framework serializers.""" ================================================ FILE: codex/serializers/admin/__init__.py ================================================ """Admin view serialzers.""" ================================================ FILE: codex/serializers/admin/flags.py ================================================ """Admin flag serializers.""" from codex.models import AdminFlag from codex.serializers.models.base import BaseModelSerializer class AdminFlagSerializer(BaseModelSerializer): """Admin Flag Serializer.""" class Meta(BaseModelSerializer.Meta): """Specify Model.""" model = AdminFlag fields = ("key", "on", "value") read_only_fields = ("key",) ================================================ FILE: codex/serializers/admin/groups.py ================================================ """Admin group serializers.""" from typing import Any, override from django.contrib.auth.models import Group from rest_framework.serializers import ( BooleanField, ) from codex.models.admin import GroupAuth from codex.serializers.models.base import BaseModelSerializer class GroupSerializer(BaseModelSerializer): """Group Serialier.""" exclude = BooleanField(default=False, source="groupauth.exclude") class Meta(BaseModelSerializer.Meta): """Specify Model.""" model = Group fields = ("pk", "name", "library_set", "user_set", "exclude") read_only_fields = ("pk",) @override def update(self, instance, validated_data) -> Any: """Update with nested GroupAuth.""" exclude = validated_data.pop("groupauth", {}).get("exclude") if exclude is not None: groupauth = GroupAuth.objects.get(group=instance) groupauth.exclude = exclude groupauth.save() return super().update(instance, validated_data) @override def create(self, validated_data) -> Any: """Create with nested GroupAuth.""" exclude = validated_data.pop("groupauth", {}).get("exclude", False) instance = super().create(validated_data) GroupAuth.objects.create(group=instance, exclude=exclude) return instance ================================================ FILE: codex/serializers/admin/libraries.py ================================================ """Admin libraries serializers.""" from pathlib import Path from rest_framework.serializers import ( BooleanField, CharField, IntegerField, ListField, Serializer, ValidationError, ) from codex.models import FailedImport, Library from codex.serializers.models.base import BaseModelSerializer class LibrarySerializer(BaseModelSerializer): """Library Serializer.""" comic_count = IntegerField(read_only=True) failed_count = IntegerField(read_only=True) class Meta(BaseModelSerializer.Meta): """Specify Model.""" model = Library fields = ( "pk", "path", "events", "last_poll", "poll", "poll_every", "groups", "covers_only", "comic_count", "failed_count", ) read_only_fields = ( "last_poll", "pk", "covers_only", "comic_count", "failed_count", ) def validate_path(self, path): """Validate new library paths.""" ppath = Path(path).resolve() if not ppath.is_dir(): reason = "Not a valid folder on this server" raise ValidationError(reason) existing_path_strs = Library.objects.values_list("path", flat=True) for existing_path_str in existing_path_strs: existing_path = Path(existing_path_str) if existing_path.is_relative_to(ppath): reason = "Parent of existing library path" raise ValidationError(reason) if ppath.is_relative_to(existing_path): reason = "Child of existing library path" raise ValidationError(reason) return path class FailedImportSerializer(BaseModelSerializer): """Failed Import Serializer.""" class Meta(BaseModelSerializer.Meta): """Specify Model.""" model = FailedImport fields = ("pk", "path", "created_at") read_only_fields = ("pk", "path", "created_at") class AdminFolderListSerializer(Serializer): """Get a list of dirs.""" root_folder = CharField(read_only=True) folders = ListField(child=CharField(read_only=True)) class AdminFolderSerializer(Serializer): """Validate a dir.""" path = CharField(default=".") show_hidden = BooleanField(default=False) def validate_path(self, path): """Validate the path is an existing directory.""" ppath = Path(path) if not ppath.resolve().is_dir(): reason = "Not a directory" raise ValidationError(reason) return path def validate_show_hidden(self, show_hidden) -> bool: """Snakecase the showHidden field.""" return show_hidden == "true" or self.initial_data.get("showHidden") == "true" ================================================ FILE: codex/serializers/admin/stats.py ================================================ """Admin stats serializers.""" from rest_framework.serializers import ( BooleanField, CharField, IntegerField, Serializer, ) from codex.serializers.fields import ( CountDictField, SerializerChoicesField, StringListMultipleChoiceField, ) FILE_TYPES_CHOICES = ("CBZ", "CBR", "CBT", "PDF", "UNKNOWN") class StatsSystemSerializer(Serializer): """Platform System Information.""" name = CharField(read_only=True, required=False) release = CharField(read_only=True, required=False) class StatsPlatformSerializer(Serializer): """Platform Information.""" docker = BooleanField(read_only=True) machine = CharField(read_only=True) cores = IntegerField(read_only=True) system = StatsSystemSerializer(read_only=True) python_version = CharField(read_only=True) codex_version = CharField(read_only=True) class StatsConfigSerializer(Serializer): """Config Information.""" library_count = IntegerField(required=False, read_only=True) user_anonymous_count = IntegerField(required=False, read_only=True) user_registered_count = IntegerField(required=False, read_only=True) auth_group_count = IntegerField(required=False, read_only=True) # Only for api api_key = CharField(read_only=True, required=False) class StatsSessionsSerializer(Serializer): """Session Settings.""" top_group = CountDictField(required=False, read_only=True) order_by = CountDictField(required=False, read_only=True) dynamic_covers = CountDictField(required=False, read_only=True) finish_on_last_page = CountDictField(required=False, read_only=True) fit_to = CountDictField(required=False, read_only=True) reading_direction = CountDictField(required=False, read_only=True) class StatsGroupSerializer(Serializer): """Group Counts.""" publisher_count = IntegerField(required=False, read_only=True) imprint_count = IntegerField(required=False, read_only=True) series_count = IntegerField(required=False, read_only=True) volume_count = IntegerField(required=False, read_only=True) issue_count = IntegerField(required=False, read_only=True) folder_count = IntegerField(required=False, read_only=True) story_arc_count = IntegerField(required=False, read_only=True) class StatsComicMetadataSerializer(Serializer): """Metadata Counts.""" age_rating_count = IntegerField(required=False, read_only=True) character_count = IntegerField(required=False, read_only=True) credit_count = IntegerField(required=False, read_only=True) credit_person_count = IntegerField(required=False, read_only=True) credit_role_count = IntegerField(required=False, read_only=True) country_count = IntegerField(required=False, read_only=True) genre_count = IntegerField(required=False, read_only=True) identifier_count = IntegerField(required=False, read_only=True) identifier_source_count = IntegerField(required=False, read_only=True) language_count = IntegerField(required=False, read_only=True) location_count = IntegerField(required=False, read_only=True) original_format_count = IntegerField(required=False, read_only=True) series_group_count = IntegerField(required=False, read_only=True) scan_info_count = IntegerField(required=False, read_only=True) story_arc_count = IntegerField(required=False) story_arc_number_count = IntegerField(required=False, read_only=True) tag_count = IntegerField(required=False, read_only=True) tagger_count = IntegerField(required=False, read_only=True) team_count = IntegerField(required=False, read_only=True) universe_count = IntegerField(required=False, read_only=True) class StatsSerializer(Serializer): """Admin Stats Tab.""" platform = StatsPlatformSerializer(required=False) config = StatsConfigSerializer(required=False) sessions = StatsSessionsSerializer(required=False) groups = StatsGroupSerializer(required=False) file_types = CountDictField(required=False) metadata = StatsComicMetadataSerializer(required=False) class AdminStatsRequestSerializer(Serializer): """Admin Stats Tab Request.""" platform = SerializerChoicesField( serializer=StatsPlatformSerializer, required=False ) config = SerializerChoicesField(serializer=StatsConfigSerializer, required=False) sessions = SerializerChoicesField( serializer=StatsSessionsSerializer, required=False ) groups = SerializerChoicesField(serializer=StatsGroupSerializer, required=False) file_types = StringListMultipleChoiceField(choices=FILE_TYPES_CHOICES) metadata = SerializerChoicesField( serializer=StatsComicMetadataSerializer, required=False ) class APIKeySerializer(Serializer): """API Key.""" api_key = CharField(source="name", read_only=True) ================================================ FILE: codex/serializers/admin/tasks.py ================================================ """Admin tasks serializers.""" from rest_framework.serializers import ( ChoiceField, IntegerField, Serializer, ) from codex.choices.jobs import ADMIN_JOBS _ADMIN_JOB_CHOICES = tuple( sorted( {item["value"] for group in ADMIN_JOBS["ADMIN_JOBS"] for item in group["jobs"]} | { variant["value"] for group in ADMIN_JOBS["ADMIN_JOBS"] for item in group["jobs"] for variant in item.get("variants", ()) } ) ) class AdminLibrarianTaskSerializer(Serializer): """Get tasks from front end.""" task = ChoiceField(choices=_ADMIN_JOB_CHOICES) library_id = IntegerField(required=False) ================================================ FILE: codex/serializers/admin/users.py ================================================ """User serializers.""" from django.contrib.auth.models import User from rest_framework.serializers import ( CharField, DateTimeField, Serializer, SerializerMetaclass, ) from codex.serializers.models.base import BaseModelSerializer class PasswordSerializerMixin(metaclass=SerializerMetaclass): """Password Serializer Mixin.""" password = CharField(write_only=True) class UserChangePasswordSerializer(Serializer, PasswordSerializerMixin): """Special User Change Password Serializer.""" class UserSerializer(BaseModelSerializer, PasswordSerializerMixin): """User Serializer.""" password = CharField(write_only=True, required=False, allow_blank=True) last_active = DateTimeField( read_only=True, source="useractive.updated_at", allow_null=True ) class Meta(BaseModelSerializer.Meta): """Specify Model.""" model = User fields = ( "pk", "username", "password", "groups", "is_staff", "is_active", "last_active", "last_login", "date_joined", ) read_only_fields = ("pk", "last_active", "last_login", "date_joined") ================================================ FILE: codex/serializers/auth.py ================================================ """Codex Auth Serializers.""" from django.contrib.auth.models import User from rest_framework.fields import BooleanField, CharField from rest_framework.serializers import ( Serializer, SerializerMetaclass, SerializerMethodField, ) from codex.choices.admin import AdminFlagChoices from codex.models.admin import AdminFlag from codex.serializers.fields.auth import TimezoneField from codex.serializers.fields.sanitized import SanitizedCharField from codex.serializers.models.base import BaseModelSerializer class UserSerializer(BaseModelSerializer): """Serialize User model for UI.""" _ADMIN_FLAG_KEYS = ( AdminFlagChoices.NON_USERS.value, AdminFlagChoices.REGISTRATION.value, ) admin_flags = SerializerMethodField() def get_admin_flags(self, *_args) -> dict: """Piggyback admin flags on the user object.""" flags = AdminFlag.objects.filter(key__in=self._ADMIN_FLAG_KEYS).values( "name", "on" ) admin_flags = {} for flag in flags: name = flag["name"] key = name[0].lower() + name[1:].replace(" ", "") admin_flags[key] = flag["on"] return admin_flags class Meta(BaseModelSerializer.Meta): """Model spec.""" model = User fields = ( "pk", "username", "is_staff", "admin_flags", ) read_only_fields = fields class TimezoneSerializerMixin(metaclass=SerializerMetaclass): """Serialize Timezone submission from front end.""" timezone = TimezoneField(write_only=True) class TimezoneSerializer(TimezoneSerializerMixin, Serializer): """Serialize Timezone submission from front end.""" class UserCreateSerializer(BaseModelSerializer, TimezoneSerializerMixin): """Serialize registration input for creating users.""" class Meta(BaseModelSerializer.Meta): """Model spec.""" model = User fields = ("username", "password", "timezone") extra_kwargs = {"password": {"write_only": True}} # noqa: RUF012 class UserLoginSerializer(UserCreateSerializer): """Serialize user login input.""" # specify this so it doesn't trigger the username unique constraint. username = SanitizedCharField(min_length=2) class Meta(UserCreateSerializer.Meta): """Explicit meta inheritance required.""" class AuthAdminFlagsSerializer(Serializer): """Admin flags related to auth.""" banner_text = CharField(read_only=True) lazy_import_metadata = BooleanField(read_only=True) non_users = BooleanField(read_only=True) registration = BooleanField(read_only=True) ================================================ FILE: codex/serializers/browser/__init__.py ================================================ """Browser Serializers.""" ================================================ FILE: codex/serializers/browser/choices.py ================================================ """Browser Choices Serializer Map.""" from types import MappingProxyType from rest_framework.fields import ( BooleanField, CharField, SerializerMethodField, ) from rest_framework.serializers import Serializer from codex.serializers.fields import ( VuetifyBooleanField, VuetifyCharField, VuetifyDecadeField, VuetifyDecimalField, VuetifyIntegerField, ) from codex.serializers.fields.browser import BookmarkFilterField from codex.serializers.fields.vuetify import ( VuetifyFileTypeChoiceField, VuetifyReadingDirectionChoiceField, VuetifyReadOnlyListField, ) from codex.serializers.models.pycountry import CountrySerializer, LanguageSerializer class BrowserFilterChoicesSerializer(Serializer): """All dynamic filters.""" age_rating = BooleanField(read_only=True) characters = BooleanField(read_only=True) country = BooleanField(read_only=True) critical_rating = BooleanField(read_only=True) credits = BooleanField(read_only=True) decade = BooleanField(read_only=True) genres = BooleanField(read_only=True) file_type = BooleanField(read_only=True) identifier_source = BooleanField(read_only=True) monochrome = BooleanField(read_only=True) language = BooleanField(read_only=True) locations = BooleanField(read_only=True) original_format = BooleanField(read_only=True) reading_direction = BooleanField(read_only=True) series_groups = BooleanField(read_only=True) stories = BooleanField(read_only=True) story_arcs = BooleanField(read_only=True) tagger = BooleanField(read_only=True) tags = BooleanField(read_only=True) teams = BooleanField(read_only=True) universes = BooleanField(read_only=True) year = BooleanField(read_only=True) class BrowserSettingsFilterSerializer(Serializer): """Filter values for settings.""" bookmark = BookmarkFilterField(required=False, read_only=True) # Dynamic filters age_rating = VuetifyReadOnlyListField() characters = VuetifyReadOnlyListField() country = VuetifyReadOnlyListField() credits = VuetifyReadOnlyListField() critical_rating = VuetifyReadOnlyListField( child=VuetifyDecimalField(max_digits=5, decimal_places=2) ) decade = VuetifyReadOnlyListField(child=VuetifyDecadeField) file_type = VuetifyReadOnlyListField(child=VuetifyFileTypeChoiceField) genres = VuetifyReadOnlyListField() identifier_source = VuetifyReadOnlyListField() language = VuetifyReadOnlyListField() locations = VuetifyReadOnlyListField() monochrome = VuetifyReadOnlyListField(child=VuetifyBooleanField) original_format = VuetifyReadOnlyListField() reading_direction = VuetifyReadOnlyListField( child=VuetifyReadingDirectionChoiceField ) series_groups = VuetifyReadOnlyListField() stories = VuetifyReadOnlyListField() story_arcs = VuetifyReadOnlyListField() tagger = VuetifyReadOnlyListField() tags = VuetifyReadOnlyListField() teams = VuetifyReadOnlyListField() universes = VuetifyReadOnlyListField() year = VuetifyReadOnlyListField() class BrowserChoicesIntegerPkSerializer(Serializer): """Named Model Serializer.""" pk = VuetifyIntegerField(read_only=True) name = CharField(read_only=True) class BrowserChoicesUniversePkSerializer(Serializer): """Universes Only.""" designation = CharField(read_only=True) class BrowserChoicesCharPkSerializer(BrowserChoicesIntegerPkSerializer): """Named Model Serailizer with pk = char hack for languages & countries.""" pk = VuetifyCharField(read_only=True) class BrowserChoicesDecimalPkSerializer(BrowserChoicesIntegerPkSerializer): """Named Model Serailizer with pk = char hack for languages & countries.""" pk = VuetifyDecimalField(max_digits=5, decimal_places=2, read_only=True) _CHOICES_NAME_SERIALIZER_MAP = MappingProxyType( { "bookmark": BrowserChoicesCharPkSerializer, "country": CountrySerializer, "critical_rating": BrowserChoicesDecimalPkSerializer, "file_type": BrowserChoicesCharPkSerializer, "language": LanguageSerializer, "universe": BrowserChoicesUniversePkSerializer, } ) _LIST_FIELDS = frozenset({"decade", "monochrome", "reading_direction", "year"}) class BrowserChoicesFilterSerializer(Serializer): """Dynamic Serializer response by field type.""" choices = SerializerMethodField(read_only=True) def get_choices(self, obj) -> list: """Dynamic Serializer response by field type.""" field_name = obj.get("field_name", "") choices = obj.get("choices", []) serializer_class = _CHOICES_NAME_SERIALIZER_MAP.get(field_name) value: list if serializer_class: value = serializer_class(choices, many=True).data elif not serializer_class and field_name in _LIST_FIELDS: field = BrowserSettingsFilterSerializer().get_fields().get(field_name) value = field.to_representation(choices) # pyright: ignore[reportOptionalMemberAccess],# ty: ignore[unresolved-attribute] else: value = BrowserChoicesIntegerPkSerializer(choices, many=True).data return value ================================================ FILE: codex/serializers/browser/filters.py ================================================ """Browser Settings Filter Serializers.""" from rest_framework.serializers import Serializer from codex.serializers.fields import ( VuetifyBooleanField, VuetifyDecadeField, VuetifyDecimalField, ) from codex.serializers.fields.browser import BookmarkFilterField from codex.serializers.fields.vuetify import ( VuetifyFileTypeChoiceField, VuetifyListField, VuetifyReadingDirectionChoiceField, ) class BrowserSettingsFilterInputSerializer(Serializer): """Filter values for settings.""" bookmark = BookmarkFilterField(required=False) # Dynamic filters age_rating = VuetifyListField() characters = VuetifyListField() country = VuetifyListField() credits = VuetifyListField() critical_rating = VuetifyListField( child=VuetifyDecimalField(max_digits=5, decimal_places=2) ) decade = VuetifyListField(child=VuetifyDecadeField) file_type = VuetifyListField(child=VuetifyFileTypeChoiceField) genres = VuetifyListField() identifier_source = VuetifyListField() language = VuetifyListField() locations = VuetifyListField() monochrome = VuetifyListField(child=VuetifyBooleanField) original_format = VuetifyListField() reading_direction = VuetifyListField(child=VuetifyReadingDirectionChoiceField) series_groups = VuetifyListField() stories = VuetifyListField() story_arcs = VuetifyListField() tagger = VuetifyListField() tags = VuetifyListField() teams = VuetifyListField() universes = VuetifyListField() year = VuetifyListField() ================================================ FILE: codex/serializers/browser/metadata.py ================================================ """Codex Serializers for the metadata box.""" from rest_framework.fields import CharField from rest_framework.serializers import IntegerField, ListField, Serializer, URLField from codex.serializers.browser.mixins import BrowserAggregateSerializerMixin from codex.serializers.models.comic import ComicSerializer from codex.serializers.models.named import ( CreditSerializer, IdentifierSeralizer, StoryArcNumberSerializer, ) PREFETCH_PREFIX = "attached_" class GroupSerializer(Serializer): """Serialize a group pk and name.""" ids = ListField(child=IntegerField(), read_only=True) name = CharField(read_only=True) number_to = CharField(read_only=True) url = URLField(read_only=True) class MetadataSerializer(BrowserAggregateSerializerMixin, ComicSerializer): """Aggregate stats for the comics selected in the metadata dialog.""" file_name = CharField(read_only=True) parent_folder_id = IntegerField(read_only=True, required=False) series_volume_count = IntegerField(read_only=True) volume_issue_count = IntegerField(read_only=True) publisher_list = GroupSerializer(many=True, required=False) imprint_list = GroupSerializer(many=True, required=False) series_list = GroupSerializer(many=True, required=False) volume_list = GroupSerializer(many=True, required=False) folder_list = GroupSerializer(many=True, required=False) story_arc_list = GroupSerializer(many=True, required=False) publisher = None # pyright: ignore[reportIncompatibleUnannotatedOverride] imprint = None # pyright: ignore[reportIncompatibleUnannotatedOverride] series = None # pyright: ignore[reportIncompatibleUnannotatedOverride] volume = None # pyright: ignore[reportIncompatibleUnannotatedOverride] credits = CreditSerializer( source=f"{PREFETCH_PREFIX}credits", many=True, allow_null=True ) identifiers = IdentifierSeralizer( source=f"{PREFETCH_PREFIX}identifiers", many=True, allow_null=True ) story_arc_numbers = StoryArcNumberSerializer( source=f"{PREFETCH_PREFIX}story_arc_numbers", many=True, allow_null=True ) class Meta(ComicSerializer.Meta): """Configure the model.""" exclude = ( # pyright: ignore[reportIncompatibleUnannotatedOverride] *ComicSerializer.Meta.exclude, "publisher", "imprint", "series", "volume", ) ================================================ FILE: codex/serializers/browser/mixins.py ================================================ """Serializer mixins.""" from datetime import UTC, datetime from itertools import chain from loguru import logger from rest_framework.serializers import ( BooleanField, DecimalField, IntegerField, ListField, SerializerMetaclass, SerializerMethodField, ) from codex.serializers.fields.group import BrowseGroupField from codex.util import max_none from codex.views.const import EPOCH_START class BrowserAggregateSerializerMixin(metaclass=SerializerMetaclass): """Mixin for browser, opds & metadata serializers.""" group = BrowseGroupField(read_only=True) ids = ListField(child=IntegerField(), read_only=True) # Aggregate Annotations child_count = IntegerField(read_only=True) mtime = SerializerMethodField(read_only=True) # Bookmark annotations page = IntegerField(read_only=True) finished = BooleanField(read_only=True) progress = DecimalField( max_digits=5, decimal_places=2, read_only=True, coerce_to_string=False ) @staticmethod def _get_max_updated_at(mtime, updated_ats) -> datetime: """Because orm won't aggregate aggregates.""" for dt_str in updated_ats: if not dt_str: continue try: dt = datetime.strptime(dt_str, "%Y-%m-%d %H:%M:%S.%f").replace( tzinfo=UTC ) except ValueError: logger.warning( f"computing group mtime: {dt_str} is not a valid datetime string." ) continue mtime = max(dt, mtime) return mtime def get_mtime(self, obj) -> int: """Compute mtime from json array aggregates.""" updated_ats = ( obj.updated_ats if obj.bmua_is_max else chain(obj.updated_ats, obj.bookmark_updated_ats) ) mtime = self._get_max_updated_at(EPOCH_START, updated_ats) if obj.bmua_is_max: mtime: datetime | None = max_none( mtime, obj.bookmark_updated_at, EPOCH_START ) if mtime is None: return 0 return int(mtime.timestamp() * 1000) ================================================ FILE: codex/serializers/browser/mtime.py ================================================ """Group Mtimes.""" from rest_framework.serializers import Serializer from codex.serializers.browser.settings import BrowserFilterChoicesInputSerializer from codex.serializers.fields import TimestampField from codex.serializers.route import SimpleRouteSerializer class GroupsMtimeSerializer(BrowserFilterChoicesInputSerializer): """Groups Mtimes.""" JSON_FIELDS = frozenset( BrowserFilterChoicesInputSerializer.JSON_FIELDS | {"groups"} ) groups = SimpleRouteSerializer(many=True, required=True) class MtimeSerializer(Serializer): """Max mtime for all submitted groups.""" max_mtime = TimestampField(read_only=True) ================================================ FILE: codex/serializers/browser/page.py ================================================ """Browser Page Serializer.""" from rest_framework.fields import ( BooleanField, CharField, DecimalField, IntegerField, ) from rest_framework.serializers import Serializer from codex.serializers.browser.mixins import ( BrowserAggregateSerializerMixin, ) from codex.serializers.fields import TimestampField from codex.serializers.fields.browser import BreadcrumbsField from codex.serializers.fields.group import BrowseGroupField class BrowserCardSerializer(BrowserAggregateSerializerMixin, Serializer): """Browse card displayed in the browser.""" pk = IntegerField(read_only=True) publisher_name = CharField(read_only=True) series_name = CharField(read_only=True) volume_name = CharField(read_only=True) volume_number_to = CharField(read_only=True) file_name = CharField(read_only=True) name = CharField(read_only=True) number_to = CharField(read_only=True) issue_number = DecimalField( max_digits=16, decimal_places=3, read_only=True, coerce_to_string=False, ) issue_suffix = CharField(read_only=True) order_value = CharField(read_only=True) page_count = IntegerField(read_only=True) reading_direction = CharField(read_only=True) has_metadata = BooleanField(read_only=True) class BrowserAdminFlagsSerializer(Serializer): """These choices change with browse context.""" folder_view = BooleanField(read_only=True) import_metadata = BooleanField(read_only=True) class BrowserTitleSerializer(Serializer): """Elements for constructing the browse title.""" group_name = CharField(read_only=True) group_number_to = CharField(read_only=True) group_count = IntegerField(read_only=True, allow_null=True) class BrowserPageSerializer(Serializer): """The main browse list.""" admin_flags = BrowserAdminFlagsSerializer(read_only=True) breadcrumbs = BreadcrumbsField(read_only=True) title = BrowserTitleSerializer(read_only=True) zero_pad = IntegerField(read_only=True) libraries_exist = BooleanField(read_only=True) model_group = BrowseGroupField(read_only=True) num_pages = IntegerField(read_only=True) groups = BrowserCardSerializer(allow_empty=True, read_only=True, many=True) books = BrowserCardSerializer(allow_empty=True, read_only=True, many=True) fts = BooleanField(read_only=True) search_error = CharField(read_only=True) mtime = TimestampField(read_only=True) ================================================ FILE: codex/serializers/browser/saved.py ================================================ """Serializers for saved browser settings.""" from rest_framework.fields import BooleanField, CharField, IntegerField from rest_framework.serializers import ListSerializer, Serializer from codex.serializers.browser.settings import BrowserSettingsSerializer class SavedSettingNameSerializer(Serializer): """A single saved setting entry.""" pk = IntegerField(read_only=True) name = CharField(read_only=True) class SavedBrowserSettingsListSerializer(Serializer): """List of saved browser settings.""" saved_settings = SavedSettingNameSerializer( many=True, read_only=True, source="savedSettings", ) class SavedBrowserSettingsSaveSerializer(Serializer): """Input for saving browser settings with a name.""" name = CharField(max_length=32, required=True) # Output-only fields created = BooleanField(read_only=True) class SavedSettingsLoadSerializer(Serializer): """Response for loading a saved setting.""" settings = BrowserSettingsSerializer(read_only=True) filter_warnings = ListSerializer( child=CharField(), read_only=True, source="filterWarnings", ) ================================================ FILE: codex/serializers/browser/settings.py ================================================ """Serializers for the browser view.""" from typing import override from rest_framework.serializers import ( BooleanField, CharField, ChoiceField, IntegerField, Serializer, ) from codex.choices.browser import BROWSER_ORDER_BY_CHOICES from codex.serializers.browser.filters import BrowserSettingsFilterInputSerializer from codex.serializers.fields import TimestampField from codex.serializers.fields.group import BrowseGroupField, BrowserRouteGroupField from codex.serializers.mixins import JSONFieldSerializer from codex.serializers.route import SimpleRouteSerializer from codex.serializers.settings import SettingsInputSerializer class BrowserSettingsShowGroupFlagsSerializer(Serializer): """Show Group Flags.""" p = BooleanField() i = BooleanField() s = BooleanField() v = BooleanField() class BrowserSettingsLastRouteSerializer(Serializer): """Last route for browser settings output.""" group = CharField() pks = CharField() page = IntegerField() @override def to_representation(self, instance) -> dict: """Handle both dicts and SettingsBrowserLastRoute model instances.""" if not isinstance(instance, dict): # Model instance pks = instance.pks instance = { "group": instance.group, "pks": tuple(pks) if pks else (0,), "page": instance.page, } return instance class BrowserFilterChoicesInputSerializer(JSONFieldSerializer): """Browser Settings for the filter choices response.""" JSON_FIELDS: frozenset[str] = frozenset({"filters"}) filters = BrowserSettingsFilterInputSerializer(required=False) # NOT Sanitized because so complex. search = CharField(allow_blank=True, required=False) class BrowserCoverInputSerializerBase(BrowserFilterChoicesInputSerializer): """Base Serializer for Cover and Settings.""" JSON_FIELDS = frozenset(BrowserFilterChoicesInputSerializer.JSON_FIELDS | {"show"}) custom_covers = BooleanField(required=False) dynamic_covers = BooleanField(required=False) order_by = ChoiceField( choices=tuple(BROWSER_ORDER_BY_CHOICES.keys()), required=False ) order_reverse = BooleanField(required=False) show = BrowserSettingsShowGroupFlagsSerializer(required=False) class BrowserCoverInputSerializer(BrowserCoverInputSerializerBase): """Browser Settings for the cover response.""" JSON_FIELDS = frozenset( BrowserCoverInputSerializerBase.JSON_FIELDS | {"parent_route"} ) parent_route = SimpleRouteSerializer(required=False) class BrowserSettingsSerializerBase(BrowserCoverInputSerializerBase): """Base Serializer for Browser & OPDS Settings.""" top_group = BrowseGroupField(required=False) @override def to_internal_value(self, data) -> dict: if "search" not in data and (search := data.get("query", data.get("q"))): # Accept "query" or "q" as an alias for "search". data = data.copy() data["search"] = search return super().to_internal_value(data) class OPDSSettingsSerializer(BrowserSettingsSerializerBase): """Browser Settings for the OPDS.""" limit = IntegerField(required=False) opds_metadata = BooleanField(required=False) query = CharField(allow_blank=True, required=False) # OPDS 2.0 class BrowserSettingsSerializer(BrowserSettingsSerializerBase): """ Browser Settings that the user can change. This is the only browse serializer that's submitted. """ mtime = TimestampField(read_only=True) twenty_four_hour_time = BooleanField(required=False) always_show_filename = BooleanField(required=False) class BrowserSettingsInputSerializer(SettingsInputSerializer): """Browser Set Settings Input Serializer.""" group = BrowserRouteGroupField(required=False) ================================================ FILE: codex/serializers/fields/__init__.py ================================================ """Custom Serializer Fields.""" from codex.serializers.fields.auth import TimestampField, TimezoneField from codex.serializers.fields.group import BrowseGroupField from codex.serializers.fields.reader import FitToField, ReadingDirectionField from codex.serializers.fields.sanitized import SanitizedCharField from codex.serializers.fields.settings import SettingsKeyField from codex.serializers.fields.stats import ( CountDictField, SerializerChoicesField, StringListMultipleChoiceField, ) from codex.serializers.fields.vuetify import ( VuetifyBooleanField, VuetifyCharField, VuetifyDecadeField, VuetifyDecimalField, VuetifyIntegerField, ) __all__ = ( "BrowseGroupField", "CountDictField", "FitToField", "ReadingDirectionField", "SanitizedCharField", "SerializerChoicesField", "SettingsKeyField", "StringListMultipleChoiceField", "TimestampField", "TimezoneField", "VuetifyBooleanField", "VuetifyCharField", "VuetifyDecadeField", "VuetifyDecimalField", "VuetifyIntegerField", ) ================================================ FILE: codex/serializers/fields/auth.py ================================================ """Custom fields.""" from datetime import UTC, datetime from typing import override from zoneinfo import ZoneInfo, ZoneInfoNotFoundError from rest_framework.exceptions import ValidationError from rest_framework.serializers import ( CharField, IntegerField, ) class TimestampField(IntegerField): """IntegerTimestampField.""" @override def to_representation(self, value) -> int: """Convert to Jascript millisecond int timestamp from datetime, or castable.""" if isinstance(value, datetime): value = value.timestamp() return int(float(value) * 1000) @override def to_internal_value(self, data) -> datetime: # pyright: ignore[reportIncompatibleMethodOverride], # ty: ignore[invalid-method-override] """Convert from castable, likely string to datetime.""" return datetime.fromtimestamp(float(data) / 1000, tz=UTC) def validate_timezone(data): """Validate Timezone.""" try: ZoneInfo(data) except ZoneInfoNotFoundError as exc: raise ValidationError from exc return data class TimezoneField(CharField): """Timezone field.""" def __init__(self, *args, **kwargs) -> None: """Call Charfield with defaults.""" super().__init__(*args, min_length=2, validators=[validate_timezone], **kwargs) ================================================ FILE: codex/serializers/fields/base.py ================================================ """Base Fields.""" from abc import ABC from collections.abc import Sequence from rest_framework.fields import ChoiceField class CodexChoiceField(ChoiceField, ABC): """Valid class choices.""" class_choices: Sequence[str] = () def __init__(self, **kwargs) -> None: """Initialize with choices.""" super().__init__(choices=self.class_choices, **kwargs) ================================================ FILE: codex/serializers/fields/browser.py ================================================ """Custom fields.""" from abc import ABC from typing import override import pycountry from loguru import logger from pycountry.db import Database from rest_framework.serializers import ListField from codex.choices.browser import ( BROWSER_BOOKMARK_FILTER_CHOICES, DUMMY_NULL_NAME, ) from codex.serializers.fields.base import CodexChoiceField from codex.serializers.fields.sanitized import SanitizedCharField from codex.serializers.route import RouteSerializer class BookmarkFilterField(CodexChoiceField): """Bookmark Choice Field.""" class_choices = tuple(BROWSER_BOOKMARK_FILTER_CHOICES.keys()) class PyCountryField(SanitizedCharField, ABC): """Serialize to a long pycountry name.""" DB: Database = pycountry.countries _ALPHA_2_LEN = 2 @override def to_representation(self, value) -> str: """Lookup the name with pycountry, just copy the value on fail.""" if not value: return "" if value == DUMMY_NULL_NAME: return value try: # fix for https://github.com/flyingcircusio/pycountry/issues/41 lookup_obj = ( self.DB.get(alpha_2=value) if len(value) == self._ALPHA_2_LEN else self.DB.lookup(value) ) # If lookup fails, return the key as the name except Exception: logger.warning(f"Could not serialize name with pycountry {value}") return value else: return lookup_obj.name if lookup_obj else value class CountryField(PyCountryField): """Serializer to long country name.""" DB: Database = pycountry.countries class LanguageField(PyCountryField): """Serializer to long language name.""" DB: Database = pycountry.languages class BreadcrumbsField(ListField): """An Array of Routes.""" child = RouteSerializer() ================================================ FILE: codex/serializers/fields/group.py ================================================ """Browse Group Field.""" from codex.choices.browser import BROWSER_ROUTE_CHOICES, BROWSER_TOP_GROUP_CHOICES from codex.serializers.fields.base import CodexChoiceField class BrowseGroupField(CodexChoiceField): """Valid Top Groups Only.""" class_choices = tuple(BROWSER_TOP_GROUP_CHOICES.keys()) class BrowserRouteGroupField(CodexChoiceField): """Valid Top Groups Only.""" class_choices = tuple(BROWSER_ROUTE_CHOICES.keys()) ================================================ FILE: codex/serializers/fields/reader.py ================================================ """Reader Fields.""" from codex.models.choices import ReadingDirectionChoices from codex.models.settings import FitToChoices from codex.serializers.fields.base import CodexChoiceField from codex.views.const import FOLDER_GROUP, STORY_ARC_GROUP VALID_ARC_GROUPS = ("s", "v", FOLDER_GROUP, STORY_ARC_GROUP) class FitToField(CodexChoiceField): """Reader FitTo Field.""" class_choices = FitToChoices.values class ReadingDirectionField(CodexChoiceField): """Reading Direction Field.""" class_choices = ReadingDirectionChoices.values class ArcGroupField(CodexChoiceField): """Arc Group Field.""" class_choices = VALID_ARC_GROUPS ================================================ FILE: codex/serializers/fields/sanitized.py ================================================ """Sanitied Fields.""" from typing import override from nh3 import clean from rest_framework.fields import CharField class SanitizedCharField(CharField): """Sanitize CharField using NH3.""" @override def to_internal_value(self, data) -> str: """Sanitize CharField using NH3.""" sanitized_data = clean(data) return super().to_internal_value(sanitized_data) ================================================ FILE: codex/serializers/fields/settings.py ================================================ """Session Custom Fields.""" from codex.choices.browser import BROWSER_DEFAULTS from codex.choices.reader import READER_DEFAULTS from codex.serializers.fields.base import CodexChoiceField class SettingsKeyField(CodexChoiceField): """Seettings Field.""" class_choices = (*READER_DEFAULTS.keys(), *BROWSER_DEFAULTS.keys()) ================================================ FILE: codex/serializers/fields/stats.py ================================================ """Custom Vuetify fields.""" from typing import override from rest_framework.fields import DictField, IntegerField from rest_framework.serializers import MultipleChoiceField class StringListMultipleChoiceField(MultipleChoiceField): """A Multiple Choice Field expressed as as a comma delimited string.""" @override def to_internal_value(self, data) -> str: """Convert comma delimited strings to sets.""" if isinstance(data, str): data = frozenset(data.split(",")) return super().to_internal_value(data) # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] class SerializerChoicesField(StringListMultipleChoiceField): """A String List Multiple Choice Field limited to a specified serializer's fields.""" def __init__(self, serializer=None, **kwargs) -> None: """Limit choices to fields from serializers.""" if not serializer: reason = "serializer required for this field." raise ValueError(reason) choices = serializer().get_fields().keys() super().__init__(choices=choices, **kwargs) class CountDictField(DictField): """Dict for counting things.""" child = IntegerField(read_only=True) ================================================ FILE: codex/serializers/fields/vuetify.py ================================================ """Custom Vuetify fields.""" import inspect from typing import override from django.utils.translation import gettext_lazy as _ from rest_framework.exceptions import ValidationError from rest_framework.fields import Field, ListField from rest_framework.serializers import ( BooleanField, CharField, DecimalField, IntegerField, ) from codex.choices.browser import VUETIFY_NULL_CODE from codex.models.choices import FileTypeChoices, ReadingDirectionChoices from codex.serializers.fields.base import CodexChoiceField class VuetifyNullCodeFieldMixin: """Convert Vuetify null codes to None.""" NULL_CODE: int = VUETIFY_NULL_CODE def to_internal_value(self, data): """Convert numeric null code to None.""" data = super().to_internal_value(data) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] return None if data == self.NULL_CODE else data def to_representation(self, data): """Convert None to numeric null code.""" data = self.NULL_CODE if data is None else data return super().to_representation(data) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] class VuetifyFileTypeChoiceField(VuetifyNullCodeFieldMixin, CodexChoiceField): # pyright: ignore[reportIncompatibleMethodOverride] """File Type Choice Field.""" class_choices = FileTypeChoices.values class VuetifyReadingDirectionChoiceField(VuetifyNullCodeFieldMixin, CodexChoiceField): # pyright: ignore[reportIncompatibleMethodOverride] """Reading Direction Choice Field.""" class_choices = ReadingDirectionChoices.values class VuetifyDecimalField(VuetifyNullCodeFieldMixin, DecimalField): # pyright: ignore[reportIncompatibleMethodOverride] """Float Field with null code conversion.""" class VuetifyIntegerField(VuetifyNullCodeFieldMixin, IntegerField): # pyright: ignore[reportIncompatibleMethodOverride] """Integer Field with null code conversion.""" class VuetifyCharField(VuetifyNullCodeFieldMixin, CharField): # pyright: ignore[reportIncompatibleMethodOverride] """Char Field with null code conversion.""" NULL_CODE: str = str(VUETIFY_NULL_CODE) # pyright: ignore[reportIncompatibleVariableOverride] class VuetifyBooleanField(VuetifyNullCodeFieldMixin, BooleanField): # pyright: ignore[reportIncompatibleMethodOverride] """Boolean Field with null code conversion.""" def validate_decade(decade) -> bool: """Validate decades.""" # * We don't need a whole db call just to be perfectly accurate # * -1s are decoded back into None before validation if decade is not None and decade % 10 != 0: raise ValidationError(_("Invalid decade: ") + f"{decade}") return True class VuetifyDecadeField(VuetifyIntegerField): """Integer field with null code conversion and decade validation.""" VALIDATORS = (validate_decade,) def __init__(self, *args, **kwargs) -> None: """Use decade validator.""" super().__init__(*args, validators=self.VALIDATORS, **kwargs) class VuetifyListField(ListField): """List with a default child and required args.""" CHILD_CLASS: type[Field] = VuetifyIntegerField READ_ONLY: bool = False def __init__( self, *args, child: type[Field] | Field | None = None, required=False, **kwargs ): """List with a default child and required.""" child_instance: Field if not child: child_instance = self.CHILD_CLASS() elif inspect.isclass(child): child_instance = child() else: child_instance = child kwargs.setdefault("read_only", self.READ_ONLY) super().__init__(*args, child=child_instance, required=required, **kwargs) @override def to_representation(self, value: list) -> list: """ List of object instances -> List of dicts of primitive datatypes. Remove superclass's None filter. """ return [self.child.to_representation(item) for item in value] class VuetifyReadOnlyListField(VuetifyListField): """Vuetify Read Only List Field.""" READ_ONLY = True ================================================ FILE: codex/serializers/homepage.py ================================================ """Serializers for homepage endpoint.""" from rest_framework.serializers import IntegerField, Serializer class HomepageSerializer(Serializer): """Minimal stats for homepage.""" publisher_count = IntegerField() series_count = IntegerField() comic_count = IntegerField() ================================================ FILE: codex/serializers/mixins.py ================================================ """Serializer mixins.""" import json from contextlib import suppress from json.decoder import JSONDecodeError from typing import Any, override from urllib.parse import unquote_plus from djangorestframework_camel_case.settings import api_settings from djangorestframework_camel_case.util import underscoreize from loguru import logger from rest_framework.serializers import ( BooleanField, Serializer, ) class OKSerializer(Serializer): """Default serializer for views without much response.""" ok = BooleanField(default=True) class JSONFieldSerializer(Serializer): """Reparse JSON encoded query_params.""" JSON_FIELDS: frozenset[str] = frozenset() @staticmethod def _parse_json_field(key, value) -> str | None: try: parsed_value = unquote_plus(value) with suppress(JSONDecodeError): parsed_value = json.loads(parsed_value) except Exception: reason = f"parsing as json: {key}:{value}" logger.exception(reason) parsed_value = None return parsed_value @override def to_internal_value(self, data) -> dict: """Reparse JSON encoded query_params.""" # It is an unbelievable amount of trouble to try to parse axios native bracket # encoded complex objects in python parsed_dict: dict[str, Any] = { key: self._parse_json_field(key, value) if key in self.JSON_FIELDS else value for key, value in data.items() } data = dict(underscoreize(parsed_dict, **api_settings.JSON_UNDERSCOREIZE)) # pyright: ignore[reportArgumentType,reportCallIssue] return super().to_internal_value(data) ================================================ FILE: codex/serializers/models/__init__.py ================================================ """Django Model Serializers.""" ================================================ FILE: codex/serializers/models/admin.py ================================================ """Admin Model Serializers.""" from codex.models.admin import LibrarianStatus from codex.serializers.models.base import BaseModelSerializer class LibrarianStatusSerializer(BaseModelSerializer): """Serializer Librarian task statuses.""" class Meta(BaseModelSerializer.Meta): """Configure the model.""" model = LibrarianStatus exclude = ("created_at",) ================================================ FILE: codex/serializers/models/base.py ================================================ """Serializer Base class for inheritable metaclass.""" from rest_framework.serializers import ModelSerializer, SerializerMetaclass class BaseModelSerializer(ModelSerializer): """BaseModel Serializer for inheritance.""" class Meta(SerializerMetaclass): # pyright: ignore[reportIncompatibleVariableOverride] """Use explicit metaclass instead of python 3 method.""" abstract = True ================================================ FILE: codex/serializers/models/bookmark.py ================================================ """Bookmark Model Serializers.""" from codex.models.bookmark import Bookmark from codex.serializers.models.base import BaseModelSerializer class BookmarkSerializer(BaseModelSerializer): """Serializer Bookmark.""" class Meta(BaseModelSerializer.Meta): """Configure the model.""" model = Bookmark fields = ( "finished", "page", ) class BookmarkFinishedSerializer(BaseModelSerializer): """The finished field of the Bookmark.""" class Meta(BaseModelSerializer.Meta): """Model spec.""" model = Bookmark fields = ("finished",) ================================================ FILE: codex/serializers/models/comic.py ================================================ """Serializers for codex models.""" from rest_framework.serializers import IntegerField from codex.models import ( Comic, ) from codex.serializers.models.base import BaseModelSerializer from codex.serializers.models.groups import ( ImprintSerializer, PublisherSerializer, SeriesSerializer, VolumeSerializer, ) from codex.serializers.models.named import ( AgeRatingSerializer, CharacterSerializer, CreditSerializer, GenreSerializer, IdentifierSeralizer, LocationSerializer, OriginalFormatSerializer, ScanInfoSerializer, SeriesGroupSerializer, StoryArcNumberSerializer, StorySerializer, TaggerSerializer, TagSerializer, TeamSerializer, UniverseSerializer, ) from codex.serializers.models.pycountry import CountrySerializer, LanguageSerializer class ComicSerializer(BaseModelSerializer): """Serialize a comic object for the metadata dialog.""" # Easier than specifying fields in Meta pk = IntegerField(source="id") # Annotations # issue_count = IntegerField(allow_null=True) could be 1 # volume_count = IntegerField(allow_null=True) could be 1 # Group FKs publisher = PublisherSerializer(allow_null=True) imprint = ImprintSerializer(allow_null=True) series = SeriesSerializer(allow_null=True) volume = VolumeSerializer(allow_null=True) # Special Serialization with pycountry country = CountrySerializer(allow_null=True) language = LanguageSerializer(allow_null=True) # Other FKS age_rating = AgeRatingSerializer(allow_null=True) original_format = OriginalFormatSerializer(allow_null=True) scan_info = ScanInfoSerializer(allow_null=True) tagger = TaggerSerializer(allow_null=True) main_character = CharacterSerializer(allow_null=True) main_team = TeamSerializer(allow_null=True) # ManyToMany characters = CharacterSerializer(many=True, allow_null=True) credits = CreditSerializer(many=True, allow_null=True) genres = GenreSerializer(many=True, allow_null=True) identifiers = IdentifierSeralizer(many=True, allow_null=True) locations = LocationSerializer(many=True, allow_null=True) series_groups = SeriesGroupSerializer(many=True, allow_null=True) stories = StorySerializer(many=True, allow_null=True) story_arc_numbers = StoryArcNumberSerializer( many=True, allow_null=True, ) tags = TagSerializer(many=True, allow_null=True) teams = TeamSerializer(many=True, allow_null=True) universes = UniverseSerializer(many=True, allow_null=True) class Meta(BaseModelSerializer.Meta): """Configure the model.""" model = Comic exclude = ("folders", "parent_folder", "stat") depth = 1 ================================================ FILE: codex/serializers/models/groups.py ================================================ """Browser Group Model Serializers.""" from codex.models import ( Imprint, Publisher, Series, Volume, ) from codex.serializers.models.named import NamedModelSerializer class GroupModelSerializer(NamedModelSerializer): """A common class for BrowserGroupModels.""" class Meta(NamedModelSerializer.Meta): """Abstract class.""" abstract = True class PublisherSerializer(GroupModelSerializer): """Publisher Model.""" class Meta(GroupModelSerializer.Meta): """Configure model.""" model = Publisher class ImprintSerializer(GroupModelSerializer): """Imprint Model.""" class Meta(GroupModelSerializer.Meta): """Configure model.""" model = Imprint class SeriesSerializer(GroupModelSerializer): """Series Model.""" class Meta(GroupModelSerializer.Meta): """Configure model.""" model = Series class VolumeSerializer(GroupModelSerializer): """Volume Model.""" class Meta(GroupModelSerializer.Meta): """Configure model.""" model = Volume ================================================ FILE: codex/serializers/models/named.py ================================================ """Named model Serializers.""" from rest_framework.fields import CharField from rest_framework.serializers import URLField from codex.models import ( AgeRating, Character, Credit, CreditPerson, CreditRole, Genre, Location, OriginalFormat, ScanInfo, SeriesGroup, Story, StoryArc, StoryArcNumber, Tag, Tagger, Team, ) from codex.models.identifier import Identifier, IdentifierSource from codex.models.named import Universe from codex.serializers.models.base import BaseModelSerializer class NamedModelSerializer(BaseModelSerializer): """A common class for NamedModels.""" class Meta(BaseModelSerializer.Meta): """Abstract.""" fields: tuple[str, ...] = ("pk", "name") abstract = True class URLNamedModelSerializer(NamedModelSerializer): """A common class for NamedModels with URLs.""" url = URLField(read_only=True, source="identifier.url") class Meta(NamedModelSerializer.Meta): """Abstract.""" fields: tuple[str, ...] = ("pk", "name", "url") abstract = True class CreditPersonSerializer(URLNamedModelSerializer): """CreditPerson model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = CreditPerson class CreditRoleSerializer(URLNamedModelSerializer): """CreditRole model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = CreditRole class CreditSerializer(BaseModelSerializer): """Credit model serializer.""" role = CreditRoleSerializer() person = CreditPersonSerializer() class Meta(BaseModelSerializer.Meta): """Model spec.""" model = Credit fields = ("pk", "person", "role") depth = 1 class CharacterSerializer(URLNamedModelSerializer): """Character model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Character class GenreSerializer(URLNamedModelSerializer): """Genre model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Genre class IdentifierSourceSerializer(NamedModelSerializer): """IdentifierSource model.""" class Meta(NamedModelSerializer.Meta): """Configure model.""" model = IdentifierSource class IdentifierSeralizer(BaseModelSerializer): """Identifier model.""" name = CharField(read_only=True) class Meta(BaseModelSerializer.Meta): """Configure model.""" model = Identifier fields = ("pk", "name", "key", "url") depth = 1 class LocationSerializer(URLNamedModelSerializer): """Location model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Location class SeriesGroupSerializer(NamedModelSerializer): """SeriesGroup model.""" class Meta(NamedModelSerializer.Meta): """Configure model.""" model = SeriesGroup class StorySerializer(URLNamedModelSerializer): """StoryArc model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Story class StoryArcSerializer(URLNamedModelSerializer): """StoryArc model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = StoryArc class StoryArcNumberSerializer(BaseModelSerializer): """StoryArc model.""" name = CharField(read_only=True) url = URLField(read_only=True, source="story_arc.identifier.url") class Meta(BaseModelSerializer.Meta): """Configure model.""" model = StoryArcNumber fields = ("pk", "name", "number", "url") depth = 1 class TaggerSerializer(NamedModelSerializer): """Tag model.""" class Meta(NamedModelSerializer.Meta): """Configure model.""" model = Tagger class AgeRatingSerializer(NamedModelSerializer): """Age Rating model.""" class Meta(NamedModelSerializer.Meta): """Configure model.""" model = AgeRating class TagSerializer(URLNamedModelSerializer): """Tag model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Tag class OriginalFormatSerializer(NamedModelSerializer): """Original Format model.""" class Meta(NamedModelSerializer.Meta): """Configure model.""" model = OriginalFormat class ScanInfoSerializer(NamedModelSerializer): """Scan Info model.""" class Meta(NamedModelSerializer.Meta): """Configure model.""" model = ScanInfo class TeamSerializer(URLNamedModelSerializer): """Team model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Team class UniverseSerializer(URLNamedModelSerializer): """Team model.""" class Meta(URLNamedModelSerializer.Meta): """Configure model.""" model = Universe fields: tuple[str, ...] = ("pk", "name", "designation", "url") ================================================ FILE: codex/serializers/models/pycountry.py ================================================ """Pycountry Model Serializers.""" from codex.models import ( Country, Language, ) from codex.serializers.fields.browser import CountryField, LanguageField from codex.serializers.models.named import NamedModelSerializer class CountrySerializer(NamedModelSerializer): """Pycountry serializer for country field.""" name = CountryField(read_only=True) class Meta(NamedModelSerializer.Meta): """Configure model.""" model = Country class LanguageSerializer(NamedModelSerializer): """Pycountry serializer for language field.""" name = LanguageField(read_only=True) class Meta(NamedModelSerializer.Meta): """Configure model.""" model = Language ================================================ FILE: codex/serializers/opds/__init__.py ================================================ """OPDS Serializers.""" ================================================ FILE: codex/serializers/opds/authentication.py ================================================ """ OPDS Authentication 1.0 Serializer. https://drafts.opds.io/schema/authentication.schema.json """ from rest_framework.fields import CharField from rest_framework.serializers import IntegerField, Serializer class OPDSAuth1LinksSerializer(Serializer): """OPDS Authentication Links.""" rel = CharField(read_only=True) href = CharField(read_only=True) type = CharField(read_only=True) width = IntegerField(read_only=True, required=False) height = IntegerField(read_only=True, required=False) class OPDSAuthetication1LabelsSerializer(Serializer): """Authentication Labels.""" login = CharField(read_only=True) password = CharField(read_only=True) class OPDSAuthentication1FlowSerializer(Serializer): """Authentication Flow.""" type = CharField(read_only=True) links = OPDSAuth1LinksSerializer(many=True, read_only=True, required=False) labels = OPDSAuthetication1LabelsSerializer(read_only=True) class OPDSAuthentication1Serializer(Serializer): """Authentication.""" title = CharField(read_only=True) id = CharField(read_only=True) description = CharField(required=False, read_only=True) links = OPDSAuth1LinksSerializer(many=True, read_only=True, required=False) authentication = OPDSAuthentication1FlowSerializer(many=True, read_only=True) ================================================ FILE: codex/serializers/opds/urls.py ================================================ """OPDS URLs.""" from rest_framework.fields import CharField from rest_framework.serializers import Serializer class OPDSURLsSerializer(Serializer): """OPDS Urls.""" v1 = CharField(read_only=True) v2 = CharField(read_only=True) ================================================ FILE: codex/serializers/opds/v1.py ================================================ """Serializers for the browser view.""" from zoneinfo import ZoneInfo from rest_framework.fields import ( BooleanField, CharField, DateField, DateTimeField, DictField, IntegerField, ) from rest_framework.serializers import Serializer from codex.serializers.fields.group import BrowseGroupField from codex.serializers.models.pycountry import LanguageSerializer UTC_TZ = ZoneInfo("UTC") class OPDS1TemplateLinkSerializer(Serializer): """OPDS Link Template Serializer.""" href = CharField(read_only=True) rel = CharField(read_only=True) mime_type = CharField(read_only=True) title = CharField(read_only=True, required=False) length = IntegerField(read_only=True, required=False) facet_group = BrowseGroupField(read_only=True, required=False) facet_active = BooleanField(read_only=True, required=False) thr_count = IntegerField(read_only=True, required=False) pse_count = IntegerField(read_only=True, required=False) pse_last_read = IntegerField(read_only=True, required=False) pse_last_read_date = DateTimeField(read_only=True, required=False) class OPDS1CreditSerializer(Serializer): """OPDS 1 Credit.""" name = CharField(read_only=True) url = CharField(read_only=True, required=False) class OPDS1TemplateEntrySerializer(Serializer): """OPDS Entry Template Serializer.""" id_tag = CharField(read_only=True) title = CharField(read_only=True) links = OPDS1TemplateLinkSerializer(many=True, read_only=True) issued = DateField(read_only=True, required=False) updated = DateTimeField(read_only=True, required=False, default_timezone=UTC_TZ) published = DateTimeField(read_only=True, required=False, default_timezone=UTC_TZ) publisher = CharField(read_only=True, required=False) language = LanguageSerializer(read_only=True, required=False) summary = CharField(read_only=True, required=False) authors = OPDS1CreditSerializer(many=True, required=False, read_only=True) credits = OPDS1CreditSerializer(many=True, required=False, read_only=True) category_groups = DictField(required=False, read_only=True) class OPDS1TemplateSerializer(Serializer): """OPDS Browser Template Serializer.""" opds_ns = CharField(read_only=True) is_acquisition = BooleanField(read_only=True) id_tag = CharField(read_only=True) title = CharField(read_only=True) updated = DateTimeField(read_only=True, default_timezone=UTC_TZ) links = OPDS1TemplateLinkSerializer(many=True, read_only=True) entries = OPDS1TemplateEntrySerializer(many=True, read_only=True) items_per_page = IntegerField(read_only=True) total_results = IntegerField(read_only=True) version = CharField(read_only=True) ================================================ FILE: codex/serializers/opds/v2/__init__.py ================================================ """OPDS v2.0 Serializers.""" ================================================ FILE: codex/serializers/opds/v2/facet.py ================================================ """OPDS v2.0 Facet Serializer.""" from rest_framework.serializers import Serializer from codex.serializers.opds.v2.links import OPDS2LinkListField from codex.serializers.opds.v2.metadata import OPDS2MetadataSerializer class OPDS2FacetSerializer(Serializer): """ Facets. https://drafts.opds.io/opds-2.0.html#24-facets """ metadata = OPDS2MetadataSerializer(read_only=True) links = OPDS2LinkListField(read_only=True) ================================================ FILE: codex/serializers/opds/v2/feed.py ================================================ """OPDS v2.0 Feed Serializer.""" from rest_framework.fields import ( ListField, ) from rest_framework.serializers import Serializer from codex.serializers.opds.v2.facet import OPDS2FacetSerializer from codex.serializers.opds.v2.links import OPDS2LinkListField from codex.serializers.opds.v2.metadata import OPDS2MetadataSerializer from codex.serializers.opds.v2.publication import OPDS2PublicationSerializer class OPDS2GroupSerializer(Serializer): """ Group. https://drafts.opds.io/opds-2.0.html#25-groups """ metadata = OPDS2MetadataSerializer(read_only=True) links = OPDS2LinkListField(read_only=True, required=False) publications = ListField( child=OPDS2PublicationSerializer(), read_only=True, required=False ) navigation = OPDS2LinkListField(read_only=True, required=False) class OPDS2FeedSerializer(OPDS2GroupSerializer): """ Feed. https://drafts.opds.io/schema/feed.schema.json https://readium.org/webpub-manifest/schema/subcollection.schema.json """ facets = ListField(child=OPDS2FacetSerializer(), read_only=True, required=False) groups = ListField(child=OPDS2GroupSerializer(), read_only=True, required=False) ================================================ FILE: codex/serializers/opds/v2/links.py ================================================ """OPDS v2.0 Links Serializers.""" from typing import override from rest_framework.fields import ( BooleanField, CharField, IntegerField, ListField, SerializerMethodField, ) from rest_framework.serializers import Serializer class OPDS2LinkBaseSerializer(Serializer): """Minimal Link Serializer for Authenticate.""" href = CharField(read_only=True) rel = SerializerMethodField(read_only=True, required=False) type = CharField(read_only=True, required=False) def get_rel(self, obj) -> str | list[str]: """Allow for SanitizedCharField or CharListField types.""" rel: str | list[str] = obj.get("rel", "") if rel and not isinstance(rel, list | str): reason = "OPDS2LinkSerializer.rel is not a list or a string." raise TypeError(reason) return rel @override def to_representation(self, instance) -> dict: """Clean complex rel field if None.""" ret = super().to_representation(instance) if "rel" in ret and not ret["rel"]: del ret["rel"] return ret class OPSD2AuthenticateSerializer(OPDS2LinkBaseSerializer): """ Authenticate Serializer. Discussion proto-spec https://github.com/opds-community/drafts/discussions/43 """ class OPDS2LinkPropertiesSerializer(Serializer): """ Link Properties. https://drafts.opds.io/schema/properties.schema.json """ number_of_items = IntegerField(read_only=True, required=False) # price = OPDS2PriceSerializer(read_only=True, required=False) unused # indirect_acquisition = OPDS2AcquisitionObjectSerializer( unused # read_only=True, many=True, required=False unused # ) unused # holds = OPDS2HoldsSerializer(read_only=True, required=False) unused # copies = OPDS2CopiesSerializer(read_only=True, required=False) unused # availability = OPDS2AvailabilitySerializer(read_only=True, required=False) unused # Discussion proto-spec https://github.com/opds-community/drafts/discussions/43 authenticate = OPSD2AuthenticateSerializer(required=False) # Divina Only # X page = CharField(read_only=True, required=False) class OPDS2LinkSerializer(OPDS2LinkBaseSerializer): """ Link. https://readium.org/webpub-manifest/schema/link.schema.json """ title = CharField(read_only=True, required=False) templated = BooleanField(read_only=True, required=False) properties = OPDS2LinkPropertiesSerializer(read_only=True, required=False) # Images height = IntegerField(read_only=True, required=False) width = IntegerField(read_only=True, required=False) size = IntegerField(read_only=True, required=False) # Uncommon # bitrate = IntegerField(read_only=True, required=False) unused # duration = IntegerField(read_only=True, required=False) unused # language = CharField(many=True, read_only=True, required=False) unused # X alternate = ListField( # X child=CharField(read_only=True), read_only=True, required=False # X ) # X children = OPDS2LinkListField(read_only=True, required=False # X ) class OPDS2LinkListField(ListField): """Link List.""" child = OPDS2LinkSerializer(read_only=True) ================================================ FILE: codex/serializers/opds/v2/metadata.py ================================================ """OPDS 2.0 Metadata Serializer.""" from rest_framework.fields import CharField, DateTimeField, IntegerField from rest_framework.serializers import Serializer class OPDS2MetadataSerializer(Serializer): """ Metadata. https://drafts.opds.io/schema/feed-metadata.schema.json """ title = CharField(read_only=True) identifier = CharField(read_only=True, required=False) subtitle = CharField(read_only=True, required=False) modified = DateTimeField(read_only=True, required=False) description = CharField(read_only=True, required=False) items_per_page = IntegerField(read_only=True, required=False) current_page = IntegerField(read_only=True, required=False) number_of_items = IntegerField(read_only=True, required=False) ================================================ FILE: codex/serializers/opds/v2/progression.py ================================================ """ OPDS v2 Progression Serializer. https://github.com/opds-community/drafts/discussions/67 """ from rest_framework.fields import ( CharField, DateTimeField, FloatField, IntegerField, ListField, ) from rest_framework.serializers import Serializer class OPDS2ProgressionDeviceSerializer(Serializer): """Progression Device Element.""" id = CharField(read_only=True) name = CharField(read_only=True) class OPDS2ProgressionLocationsSerializer(Serializer): """Progression Location Element.""" fragments = ListField(child=CharField(read_only=True), read_only=True) position = IntegerField() progression = FloatField(read_only=True) total_progression = FloatField(read_only=True) class OPDS2ProgressionLocatorSerializer(Serializer): """Progression Locator Element.""" title = CharField(read_only=True) href = CharField(read_only=True) type = CharField(read_only=True) locations = OPDS2ProgressionLocationsSerializer() class OPDS2ProgressionSerializer(Serializer): """ Progression. https://github.com/opds-community/drafts/discussions/67#discussioncomment-6414507 """ modified = DateTimeField(read_only=True) device = OPDS2ProgressionDeviceSerializer() locator = OPDS2ProgressionLocatorSerializer() ================================================ FILE: codex/serializers/opds/v2/publication.py ================================================ """OPDS 2 Publication Serializers.""" from rest_framework.fields import CharField, DateField, IntegerField, ListField from rest_framework.serializers import Serializer from codex.serializers.opds.v2.facet import OPDS2FacetSerializer from codex.serializers.opds.v2.links import OPDS2LinkListField from codex.serializers.opds.v2.metadata import OPDS2MetadataSerializer class OPDS2SubjectSerializer(Serializer): """ Subject Object. Can be nearly any tag type. https://readium.org/webpub-manifest/schema/subject.schema.json """ name = CharField(read_only=True) # X code = CharField(read_only=True, required=True) # X scheme = URIField(read_only=True, required=False) links = OPDS2LinkListField(read_only=True, required=False) # X sort_as = CharField(read_only=True, required=False) language-map.schema.json class OPDS2ContributorSerializer(OPDS2SubjectSerializer): """ Credit Object. https://readium.org/webpub-manifest/schema/contributor.schema.json """ identifier = CharField(read_only=True, required=False) # X alt_identifier = CharField(read_only=True, required=False) role = CharField(read_only=True, source="role_name", required=False) # X role = CharListField(read_only=True) unused class OPDS2BelongsToObjectSerializer(Serializer): """BelongsTo Series Field.""" name = CharField(read_only=True) position = IntegerField(read_only=True, required=False) links = OPDS2LinkListField(read_only=True) class OPDS2BelongsTo(Serializer): """BelongsTo metadata field.""" collection = ListField( child=OPDS2BelongsToObjectSerializer(read_only=True), read_only=True, required=False, ) series = ListField( child=OPDS2BelongsToObjectSerializer(read_only=True), read_only=True, required=False, ) story_arc = ListField( child=OPDS2BelongsToObjectSerializer(read_only=True), read_only=True, required=False, ) class OPDS2PublicationMetadataSerializer(OPDS2MetadataSerializer): """ Metadata for publications. https://readium.org/webpub-manifest/schema/metadata.schema.json """ vars()["@type"] = CharField(read_only=True, default="https://schema.org/ComicIssue") conforms_to = CharField( read_only=True, default="https://readium.org/webpub-manifest/schema/metadata.schema.json", ) # X sort_as = CharField(read_only=True, required=False) # X alt_identifier = CharField(read_only=True, required=False) # X accessibility = OPDS2Accessibility(read_only=True, required=False) published = DateField(read_only=True, required=False) # reading_progression = ChoiceField() unused ##################### # Extended metadata # ##################### language = CharField(read_only=True, required=False) author = OPDS2ContributorSerializer(many=True, required=False) translator = OPDS2ContributorSerializer(many=True, required=False) editor = OPDS2ContributorSerializer(many=True, required=False) artist = OPDS2ContributorSerializer(many=True, required=False) illustrator = OPDS2ContributorSerializer(many=True, required=False) letterer = OPDS2ContributorSerializer(many=True, required=False) peniciller = OPDS2ContributorSerializer(many=True, required=False) colorist = OPDS2ContributorSerializer(many=True, required=False) inker = OPDS2ContributorSerializer(many=True, required=False) narrator = OPDS2ContributorSerializer(many=True, required=False) contributor = OPDS2ContributorSerializer(many=True, required=False) publisher = CharField(read_only=True, required=False) imprint = CharField(read_only=True, required=False) subject = OPDS2SubjectSerializer(many=True, required=False) layout = CharField(read_only=True, required=False) reading_progression = CharField(read_only=True, required=False) # choice field # X duration = InteField(read_only=True, required=False) belongs_to = OPDS2BelongsTo(required=False) # X contains = OPDS2Containse(required=False) # X tdm = OPDS2TDM(required=False) class OPDS2PublicationSerializer(OPDS2FacetSerializer): """ Publication. https://drafts.opds.io/schema/publication.schema.json """ conforms_to = CharField( read_only=True, default="https://drafts.opds.io/schema/publication.schema.json" ) metadata = OPDS2PublicationMetadataSerializer(read_only=True) # pyright: ignore[reportIncompatibleUnannotatedOverride] links = OPDS2LinkListField(read_only=True) images = OPDS2LinkListField(read_only=True, required=False) class OPDS2PublicationDivinaMetadataSerializer(OPDS2PublicationMetadataSerializer): """ Divina Visual Narratives Metadata. https://readium.org/webpub-manifest/profiles/divina """ conforms_to = CharField( read_only=True, default="https://readium.org/webpub-manifest/profiles/divina " ) class OPDS2PublicationDivinaManifestSerializer(OPDS2PublicationSerializer): """ Divina Visual Narratives Profile. https://readium.org/webpub-manifest/profiles/divina """ vars()["@context"] = CharField( read_only=True, default="https://readium.org/webpub-manifest/context.jsonld" ) conforms_to = CharField( read_only=True, default="https://readium.org/webpub-manifest/profiles/divina" ) metadata = OPDS2PublicationDivinaMetadataSerializer(read_only=True) # pyright: ignore[reportIncompatibleUnannotatedOverride] reading_order = OPDS2LinkListField(read_only=True, required=False) # X resources = OPDS2LinkListField(read_only=True, required=False) # X toc = OPDS2LinkListField(read_only=True, required=False) # X landmarks = OPDS2LinkListField(read_only=True, required=False) # X page_list = OPDS2LinkListField(read_only=True, required=False) ================================================ FILE: codex/serializers/opds/v2/unused.py ================================================ """Unused OPDS v2 Serializers.""" from typing import Any, override from rest_framework.fields import CharField, DateTimeField, DecimalField, IntegerField from rest_framework.serializers import ChoiceField, Serializer from codex.serializers.opds.v2.links import OPDS2LinkListField class RecursiveField(Serializer): """ A recursive field type. There is a more comprehensive solution at https://pypi.org/project/djangorestframework-recursive/ """ @override def to_representation(self, instance) -> Any: """Represent with own class.""" parent = self.parent # Should not be ignored but is currently unused serializer = parent.parent.__class__(instance, context=self.context) return serializer.data class OPDS2PriceSerializer(Serializer): """ Prices. https://drafts.opds.io/schema/properties.schema.json """ value = DecimalField(read_only=True, max_digits=10, decimal_places=2) # by schema this should be a choices for allowed currencies. currency = CharField(read_only=True, max_length=3) class OPDS2HoldsSerializer(Serializer): """ Holds. https://drafts.opds.io/schema/properties.schema.json """ total = IntegerField(read_only=True) position = IntegerField(read_only=True) class OPDS2CopiesSerializer(Serializer): """ Copies. https://drafts.opds.io/schema/properties.schema.json """ total = IntegerField(read_only=True) available = IntegerField(read_only=True) class OPDS2AcquisitionObjectSerializer(Serializer): """ Acquisition Object. https://drafts.opds.io/schema/acquisition-object.schema.json """ type = CharField(read_only=True) child = RecursiveField(many=True, required=False) class OPDS2ProfileSerializer(Serializer): """ Profile. https://drafts.opds.io/schema/profile.schema.json """ name = CharField(read_only=True) email = CharField(read_only=True) links = OPDS2LinkListField(read_only=True, required=False) loans = OPDS2CopiesSerializer(read_only=True) holds = OPDS2HoldsSerializer(read_only=True) class OPDS2AvailabilitySerializer(Serializer): """ Availability. https://drafts.opds.io/schema/properties.schema.json """ CHOICES = ("available", "unavailable", "reserved", "ready") state = ChoiceField( choices=CHOICES, read_only=True, ) since = DateTimeField(read_only=True, required=False) until = DateTimeField(read_only=True, required=False) ================================================ FILE: codex/serializers/reader.py ================================================ """Codex Reader Serializers.""" from typing import override from rest_framework.fields import ( BooleanField, CharField, DecimalField, DictField, IntegerField, ListField, ) from rest_framework.serializers import Serializer from codex.serializers.fields import ( FitToField, ReadingDirectionField, TimestampField, ) from codex.serializers.fields.reader import ArcGroupField from codex.serializers.mixins import JSONFieldSerializer from codex.serializers.route import RouteSerializer class ReaderSettingsSerializer(Serializer): """Reader settings the user can change.""" fit_to = FitToField(allow_blank=True, required=False) two_pages = BooleanField(allow_null=True, required=False) reading_direction = ReadingDirectionField( allow_blank=True, allow_null=True, required=False ) read_rtl_in_reverse = BooleanField(allow_null=True, required=False) finish_on_last_page = BooleanField(allow_null=True, required=False) mtime = TimestampField(read_only=True) page_transition = BooleanField(allow_null=True, required=False) cache_book = BooleanField(allow_null=True, required=False) class ReaderScopedUpdateSerializer(ReaderSettingsSerializer): """Input for scoped settings PATCH.""" scope = CharField(required=True) scope_pk = IntegerField(required=False, allow_null=True, default=None) class ReaderBookmarkSerializer(Serializer): """Bookmark data for reader (page progress).""" page = IntegerField(read_only=True, required=False) finished = BooleanField(read_only=True, required=False) class ReaderComicSerializer(Serializer): """Prev, Next and Current Comic info.""" pk = IntegerField(read_only=True) settings = ReaderSettingsSerializer(read_only=True) bookmark = ReaderBookmarkSerializer(read_only=True, required=False, allow_null=True) max_page = IntegerField(read_only=True) reading_direction = ReadingDirectionField(read_only=True) mtime = TimestampField(read_only=True) has_metadata = BooleanField(read_only=True) class ReaderArcInfoSerializer(Serializer): """Information about the current Arc.""" name = CharField(read_only=True) mtime = TimestampField(read_only=True) class ReaderSelectedArcSerializer(Serializer): """And arc key serializer.""" group = ArcGroupField(required=False) ids = ListField(child=IntegerField(), required=False) index = IntegerField(read_only=True, required=False) count = IntegerField(read_only=True, required=False) class ReaderViewInputSerializer(JSONFieldSerializer): """Input for the reader serailizer.""" JSON_FIELDS = frozenset({"arc"}) arc = ReaderSelectedArcSerializer(required=False) class ReaderCurrentComicSerializer(ReaderComicSerializer): """Current comic only Serializer.""" # For title series_name = CharField(read_only=True, required=False) volume_name = CharField(read_only=True, required=False) volume_number_to = CharField(read_only=True, required=False) issue_number = DecimalField( max_digits=None, decimal_places=3, read_only=True, coerce_to_string=False, required=False, ) issue_suffix = CharField(read_only=True, required=False) issue_count = IntegerField(read_only=True, required=False) file_type = CharField(read_only=True, required=False) filename = CharField(read_only=True, required=False) name = CharField(read_only=True, required=False) class ReaderBooksSerializer(Serializer): """All comics relevant to the reader.""" current = ReaderCurrentComicSerializer(read_only=True) prev = ReaderComicSerializer(read_only=True, required=False) next = ReaderComicSerializer(read_only=True, required=False) class ArcsIdsField(DictField): """Arcs Ids level.""" @override def to_representation(self, value): """Serialize the ids to a string.""" string_keyed_map = {} for ids, arc_info in value.items(): string_key = ",".join(str(pk) for pk in ids) string_keyed_map[string_key] = arc_info return super().to_representation(string_keyed_map) child = ReaderArcInfoSerializer(read_only=True) class ArcsField(DictField): """Arcs Field.""" child = ArcsIdsField(read_only=True) class ReaderComicsSerializer(Serializer): """Books and arcs.""" arcs = ArcsField(read_only=True) arc = ReaderSelectedArcSerializer(read_only=True) books = ReaderBooksSerializer(read_only=True) close_route = RouteSerializer(read_only=True) mtime = TimestampField(read_only=True) ================================================ FILE: codex/serializers/redirect.py ================================================ """Notification serializers.""" from rest_framework.fields import CharField from rest_framework.serializers import Serializer from codex.serializers.browser.settings import BrowserSettingsSerializer from codex.serializers.route import RouteSerializer class ReaderRedirectSerializer(Serializer): """Reader 404 message.""" reason = CharField(read_only=True) route = RouteSerializer(read_only=True) class BrowserRedirectSerializer(ReaderRedirectSerializer): """Redirect to another route.""" settings = BrowserSettingsSerializer(read_only=True) ================================================ FILE: codex/serializers/route.py ================================================ """Vue Route Serializer.""" from dataclasses import asdict from typing import override from rest_framework.fields import CharField, IntegerField from rest_framework.serializers import Serializer from codex.serializers.fields.group import BrowserRouteGroupField from codex.serializers.fields.sanitized import SanitizedCharField from codex.views.util import Route class SimpleRouteSerializer(Serializer): """A an abbreviated vue route for the browser.""" group = BrowserRouteGroupField() pks = CharField() @override def to_representation(self, instance) -> dict: """Allow submission of sequences instead of strings for pks.""" instance = asdict(instance) if isinstance(instance, Route) else dict(instance) pks = instance["pks"] if not pks: instance["pks"] = "0" elif not isinstance(pks, str): instance["pks"] = ",".join(str(pk) for pk in sorted(pks)) return super().to_representation(instance) @override def to_internal_value(self, data) -> dict: """Convert pk strings to tuples.""" instance = super().to_internal_value(data) try: pks = instance.get("pks") if isinstance(pks, str): pks = tuple(sorted(int(pk) for pk in pks.split(","))) if 0 in pks: pks = () instance["pks"] = tuple(pks) except ValueError: instance["pks"] = () return instance class RouteSerializer(SimpleRouteSerializer): """A vue route for the browser.""" page = IntegerField() name = SanitizedCharField(allow_blank=True, required=False) ================================================ FILE: codex/serializers/settings.py ================================================ """Settings Serializer.""" from rest_framework.serializers import ListSerializer from codex.serializers.fields.settings import SettingsKeyField from codex.serializers.mixins import JSONFieldSerializer class SettingsInputSerializer(JSONFieldSerializer): """For requesting settings.""" JSON_PARAMS = frozenset({"only"}) only = ListSerializer(child=SettingsKeyField(), required=False) ================================================ FILE: codex/serializers/versions.py ================================================ """Versions Serializer.""" from rest_framework.fields import CharField from rest_framework.serializers import Serializer class VersionsSerializer(Serializer): """Codex version information.""" installed = CharField(read_only=True) latest = CharField(read_only=True) warning = CharField(read_only=True) ================================================ FILE: codex/settings/README.md ================================================ # codex.settings I moved the functions that service the django settings file into their own module to clean up the readability of the settings file. ================================================ FILE: codex/settings/__init__.py ================================================ """ Django settings for codex project. Generated by 'django-admin startproject' using Django 3.0.3. For more information on this file, see https://docs.djangoproject.com/en/dev/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/dev/ref/settings/ """ from os import environ from pathlib import Path from types import MappingProxyType from comicbox.config import get_config from comicbox.config.frozenattrdict import FrozenAttrDict from django.utils.csp import ( # pyright: ignore[reportMissingImports], # ty: ignore[unresolved-import] CSP, ) from loguru import logger from codex.settings.config import ( get_bool, get_float, get_int, get_str, load_codex_config, ) from codex.settings.logging import get_logging_settings from codex.settings.secret_key import get_secret_key from codex.settings.servestatic import immutable_file_test from codex.settings.timezone import get_time_zone ########################### # Constants & Env Helpers # ########################### FALSY = frozenset({None, "", "false", "0", False, "False"}) def not_falsy_env(name): """Return a boolean environment envs mindful of falsy values.""" return bool(environ.get(name, "").lower() not in FALSY) ############## # Base Paths # ############## BASE_DIR = Path(__file__).resolve().parent.parent.parent CODEX_PATH = BASE_DIR / "codex" CONFIG_PATH = Path(environ.get("CODEX_CONFIG_DIR", Path.cwd() / "config")) ##################### # Basic Environment # ##################### # SECURITY WARNING: don't run with debug turned on in production! DEBUG = not_falsy_env("DEBUG") BUILD = not_falsy_env("BUILD") VITE_HOST = environ.get("VITE_HOST") TZ = environ.get("TIMEZONE", environ.get("TZ")) DOCKER_IMAGE_DEPRECATED = environ.get("DOCKER_IMAGE_DEPRECATED", "") ########################## # Codex TOML Config Load # ########################## CODEX_CONFIG_TOML = CONFIG_PATH / "codex.toml" CODEX_CONFIG_TOML_DEFAULT = CODEX_PATH / "settings/codex.toml.default" CODEX_CONFIG = load_codex_config(CODEX_CONFIG_TOML, CODEX_CONFIG_TOML_DEFAULT) ################################### # Codex Config: Server (Granian) # ################################### GRANIAN_HOST = get_str(CODEX_CONFIG, "server.host", default="0.0.0.0") # noqa: S104 GRANIAN_PORT = get_int(CODEX_CONFIG, "server.port", default=9810) GRANIAN_WORKERS = get_int(CODEX_CONFIG, "server.workers", default=1) GRANIAN_HTTP = get_str(CODEX_CONFIG, "server.http", default="auto") GRANIAN_WEBSOCKETS = get_bool(CODEX_CONFIG, "server.websockets", default=True) GRANIAN_URL_PATH_PREFIX = get_str(CODEX_CONFIG, "server.url_path_prefix", default="") ############################## # Codex Config: Logging # ############################## LOGLEVEL = get_str( CODEX_CONFIG, "logging.loglevel", default="TRACE" if DEBUG else "INFO" ) LOG_RETENTION = get_str(CODEX_CONFIG, "logging.log_retention", default="6 months") LOG_DIR = Path(environ.get("CODEX_LOG_DIR", CONFIG_PATH / "logs")) LOG_TO_CONSOLE = get_bool(CODEX_CONFIG, "logging.log_to_console", default=True) LOG_TO_FILE = get_bool(CODEX_CONFIG, "logging.log_to_file", default=True) LOG_PATH = LOG_DIR / "codex.log" LOG_ROTATION = "10 MB" ############################## # Codex Config: Auth # ############################## AUTH_REMOTE_USER = get_bool(CODEX_CONFIG, "auth.remote_user", default=False) ############################## # Codex Config: Browser # ############################## BROWSER_MAX_OBJ_PER_PAGE = get_int( CODEX_CONFIG, "browser.max_obj_per_page", default=100 ) ############################## # Codex Config: Throttle # ############################## THROTTLE_ANON = get_int(CODEX_CONFIG, "throttle.anon", default=0) THROTTLE_USER = get_int(CODEX_CONFIG, "throttle.user", default=0) THROTTLE_OPDS = get_int(CODEX_CONFIG, "throttle.opds", default=0) THROTTLE_OPENSEARCH = get_int(CODEX_CONFIG, "throttle.opensearch", default=0) ############################## # Codex Config: Importer # ############################## # iterator(chunk_size=...) with prefetch_related at delete/comics.py:48 # Limit: memory only # Previous: was working with 1000 IMPORTER_DELETE_MAX_CHUNK_SIZE = get_int( CODEX_CONFIG, "importer.delete_max_chunk_size", default=2000 ) # OR-chain Q()s in filters.py:32-43 and link/delete.py:37-41 # Limit: 990, 900 for safety. IMPORTER_FILTER_BATCH_SIZE = get_int( CODEX_CONFIG, "importer.filter_batch_size", default=900 ) # This was derived with experiments long ago IMPORTER_SEARCH_SYNC_BATCH_MEMORY_RATIO = get_float( CODEX_CONFIG, "importer.search_sync_batch_memory_ratio", default=3.2 ) # Batch size for slicing Comic paths into `filter(path__in=...)` queries. # SQLite caps at 32766 variables; 30000 leaves headroom for the `library` param. # path__in=batch_paths at links_fk.py:86 # Limit: 32760, 30000 is safe IMPORTER_LINK_FK_BATCH_SIZE = get_int( CODEX_CONFIG, "importer.link_fk_batch_size", default=30000 ) # iterator(chunk_size=...) over prefetch-heavy qs at links_m2m.py:89 # Limit: memory + prefetch IN subqueries IMPORTER_LINK_M2M_BATCH_SIZE = get_int( CODEX_CONFIG, "importer.link_m2m_batch_size", default=20000 ) # Comic bulk_update uses CASE WHEN pk=? THEN ? per field (2 params/field/row) # plus 1 pk/row in WHERE IN. SQLite caps at 32766 variables; Django's # auto-batching undercounts. 400 * (2*36 + 1) = 29200 leaves headroom. # Limit: 448, 400 is safe. IMPORTER_UPDATE_COMIC_BATCH_SIZE = get_int( CODEX_CONFIG, "importer.update_comic_batch_size", default=400 ) ############################## # Codex Config: Debug # ############################## DEBUG_LOG_AUTH_HEADERS = get_bool(CODEX_CONFIG, "debug.log_auth_headers", default=False) DEBUG_LOG_REQUEST = get_bool(CODEX_CONFIG, "debug.log_request", default=False) DEBUG_LOG_RESPONSE_TIME = get_bool( CODEX_CONFIG, "debug.log_response_time", default=False ) DEBUG_SLOW_QUERY_LIMIT = get_float(CODEX_CONFIG, "debug.slow_query_limit", default=0.5) ############################## # Codex Env: Repair Flags # ############################## RESET_ADMIN = not_falsy_env("CODEX_RESET_ADMIN") FIX_FOREIGN_KEYS = not_falsy_env("CODEX_FIX_FOREIGN_KEYS") INTEGRITY_CHECK = not_falsy_env("CODEX_INTEGRITY_CHECK") FTS_INTEGRITY_CHECK = not_falsy_env("CODEX_FTS_INTEGRITY_CHECK") FTS_REBUILD = not_falsy_env("CODEX_FTS_REBUILD") ############################################### # # # Django Settings # # # ############################################### ############ # Security # ############ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = get_secret_key(CONFIG_PATH) ALLOWED_HOSTS = ["*"] CORS_ALLOW_CREDENTIALS = True SECURE_CSP = { "default-src": [CSP.SELF], "script-src": [ CSP.SELF, CSP.NONCE, "https://cdn.jsdelivr.net/npm/swagger-ui-dist@latest/swagger-ui-bundle.js", "https://cdn.jsdelivr.net/npm/swagger-ui-dist@latest/swagger-ui-standalone-preset.js", ], "style-src": [ CSP.SELF, # Titanic amount of work to make this safe with vite CSP.UNSAFE_INLINE, "https://cdn.jsdelivr.net/npm/swagger-ui-dist@latest/swagger-ui.css", ], "img-src": [ "data:", CSP.SELF, "https://cdn.jsdelivr.net/npm/swagger-ui-dist@latest/favicon-32x32.png", ], "connect-src": [ CSP.SELF, "ws:", "wss:", "https://cdn.jsdelivr.net/npm/swagger-ui-dist@latest/swagger-ui.css.map", ], } # Session SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" SESSION_COOKIE_AGE = 60 * 60 * 24 * 60 # 60 days # Proxy headers USE_X_FORWARDED_HOST = True SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") # Debug INTERNAL_IPS = ("127.0.0.1",) ########### # Logging # ########### LOGGING = get_logging_settings(LOGLEVEL, debug=DEBUG) ###################### # Installed Apps # ###################### def _get_installed_apps() -> tuple: installed_apps = [ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", ] if DEBUG: # comes before static apps installed_apps += ["nplusone.ext.django", "schema_graph"] installed_apps += [ "servestatic.runserver_nostatic", "django.contrib.staticfiles", "rest_framework", "rest_framework.authtoken", "rest_registration", "corsheaders", ] if not BUILD: installed_apps += [ "django_vite", ] installed_apps += [ "codex", "cachalot", "drf_spectacular", ] return tuple(installed_apps) INSTALLED_APPS = _get_installed_apps() ############## # Middleware # ############## def _get_middleware() -> tuple: middleware = [ "corsheaders.middleware.CorsMiddleware", "django.middleware.security.SecurityMiddleware", "servestatic.middleware.ServeStaticMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.middleware.csrf.CsrfViewMiddleware", "django.middleware.csp.ContentSecurityPolicyMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", ] if AUTH_REMOTE_USER: middleware += ["codex.authentication.HttpRemoteUserMiddleware"] middleware += [ "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", "codex.middleware.CodexMiddleware", ] if DEBUG: middleware += [ "nplusone.ext.django.NPlusOneMiddleware", ] if DEBUG_LOG_RESPONSE_TIME: middleware += [ "codex.middleware.LogResponseTimeMiddleware", ] if DEBUG_LOG_REQUEST: middleware += [ "codex.middleware.LogRequestMiddleware", ] return tuple(middleware) MIDDLEWARE = _get_middleware() if DEBUG: NPLUSONE_LOGGER = logger NPLUSONE_LOG_LEVEL = "WARNING" ######################## # URL & WSGI Routing # ######################## ROOT_URLCONF = "codex.urls.root" WSGI_APPLICATION = "codex.wsgi.application" ############# # Templates # ############# CODEX_TEMPLATES = CODEX_PATH / "templates" _DEV_LOADERS = MappingProxyType( { "loaders": [ # Explicitly use non-cached loaders in dev to prevent handle retention ("django.template.loaders.filesystem.Loader",), ("django.template.loaders.app_directories.Loader",), ] } ) _TEMPLATES_LOADERS = _DEV_LOADERS if DEBUG else {} TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [CODEX_TEMPLATES], "APP_DIRS": not DEBUG, "OPTIONS": { **_TEMPLATES_LOADERS, "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.csp", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] ############ # Database # ############ DB_PATH = CONFIG_PATH / "codex.sqlite3" # Migrate old DB location OLD_DB_PATH = CONFIG_PATH / "db.sqlite3" if not DB_PATH.exists() and OLD_DB_PATH.exists(): OLD_DB_PATH.rename(DB_PATH) BACKUP_DB_DIR = CONFIG_PATH / "backups" BACKUP_DB_PATH = (BACKUP_DB_DIR / DB_PATH.stem).with_suffix(DB_PATH.suffix + ".bak") DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": DB_PATH, "CONN_MAX_AGE": 600, "OPTIONS": { "init_command": "PRAGMA journal_mode=wal;", "timeout": 120, }, }, } # The new DEFAULT_AUTO_FIELD in Django 3.2 is BigAutoField (64 bit), # but it can't be auto migrated. Automigration has been punted to # Django 4.0 at the earliest: # https://code.djangoproject.com/ticket/32674 DEFAULT_AUTO_FIELD = "django.db.models.AutoField" ################## # Authentication # ################## if AUTH_REMOTE_USER: AUTHENTICATION_BACKENDS = [ "django.contrib.auth.backends.RemoteUserBackend", "django.contrib.auth.backends.ModelBackend", ] # https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": ( "django.contrib.auth.password_validation.UserAttributeSimilarityValidator" ) }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", "OPTIONS": {"min_length": 4}, }, ] ######################## # Internationalization # ######################## LANGUAGE_CODE = "en-us" USE_I18N = True TIME_ZONE = get_time_zone(TZ) ################ # Static Files # ################ STATIC_ROOT = CODEX_PATH / "static" SERVESTATIC_IMMUTABLE_FILE_TEST = immutable_file_test SERVESTATIC_USE_ZSTD = False # only a win for dynamic files SERVESTATIC_STATIC_PREFIX = "/static" # otherwise is based on STATIC_URL STATIC_URL = GRANIAN_URL_PATH_PREFIX.rstrip("/") + SERVESTATIC_STATIC_PREFIX + "/" STORAGES = { "staticfiles": { # Don't use Manifest storage because vite does that for me and saves space. "BACKEND": "servestatic.storage.CompressedStaticFilesStorage", } } STATICFILES_DIRS = ( [CODEX_PATH / "static_src", CODEX_PATH / "static_build"] if (DEBUG or BUILD) else [] ) for path in STATICFILES_DIRS: path.mkdir(exist_ok=True, parents=True) ######### # Cache # ######### ROOT_CACHE_PATH = CONFIG_PATH / "cache" DEFAULT_CACHE_PATH = ROOT_CACHE_PATH / "default" DEFAULT_CACHE_PATH.mkdir(exist_ok=True, parents=True) CACHES = { "default": { "BACKEND": "django.core.cache.backends.filebased.FileBasedCache", "LOCATION": str(DEFAULT_CACHE_PATH), }, } ################## # REST Framework # ################## _THROTTLE_MAP = MappingProxyType( { "anon": ("rest_framework.throttling.AnonRateThrottle", THROTTLE_ANON), "user": ("rest_framework.throttling.UserRateThrottle", THROTTLE_USER), "opds": ("rest_framework.throttling.ScopedRateThrottle", THROTTLE_OPDS), "opensearch": ( "rest_framework.throttling.ScopedRateThrottle", THROTTLE_OPENSEARCH, ), } ) _THROTTLE_CLASSES = set() _THROTTLE_RATES = {} for scope, value in _THROTTLE_MAP.items(): classname, rate_value = value if rate_value or classname == "rest_framework.throttling.ScopedRateThrottle": _THROTTLE_CLASSES.add(classname) rate = f"{rate_value}/min" if value[1] else None _THROTTLE_RATES[scope] = rate _RENDERER_CLASSES = [ "djangorestframework_camel_case.render.CamelCaseJSONRenderer", ] if DEBUG: _RENDERER_CLASSES.append( "djangorestframework_camel_case.render.CamelCaseBrowsableAPIRenderer" ) REST_FRAMEWORK = { "DEFAULT_AUTHENTICATION_CLASSES": ( "rest_framework.authentication.SessionAuthentication", "codex.authentication.BearerTokenAuthentication", ), "DEFAULT_RENDERER_CLASSES": tuple(_RENDERER_CLASSES), "DEFAULT_PARSER_CLASSES": ( "djangorestframework_camel_case.parser.CamelCaseJSONParser", ), "DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema", "EXCEPTION_HANDLER": "codex.views.error.codex_exception_handler", "DEFAULT_THROTTLE_CLASSES": tuple(_THROTTLE_CLASSES), "DEFAULT_THROTTLE_RATES": _THROTTLE_RATES, } ##################### # REST Registration # ##################### REST_REGISTRATION = { "REGISTER_VERIFICATION_ENABLED": False, "REGISTER_EMAIL_VERIFICATION_ENABLED": False, "RESET_PASSWORD_VERIFICATION_ENABLED": False, "USER_HIDDEN_FIELDS": ( # DEFAULT "last_login", "is_active", "user_permissions", "groups", "date_joined", # SHOWN # "is_staff", "is_superuser", # HIDDEN "email", "first_name", "last_name", ), } ##################### # API Documentation # ##################### SPECTACULAR_SETTINGS = { "TITLE": "Codex API", "DESCRIPTION": "Comic Library Browser and Reader", "OAS_VERSION": "3.1.0", "VERSION": "3.0.0", "CONTACT": { "name": "Support", "url": "https://github.com/ajslater/codex?tab=readme-ov-file#-support", }, "PREPROCESSING_HOOKS": ["codex.urls.spectacular.allow_list"], "SERVE_PERMISSIONS": ["rest_framework.permissions.IsAdminUser"], "EXTERNAL_DOCS": { "url": "https://codex-comic-reader.readthedocs.io/", "description": "Codex Docs", }, "ENUM_NAME_OVERRIDES": { # group "BrowseGroupEnum": "codex.serializers.fields.group.BrowseGroupField.class_choices", "BrowserRouteGroupEnum": "codex.serializers.fields.group.BrowserRouteGroupField.class_choices", # reading_direction "BookmarkReadingDirectionEnum": "codex.models.choices.ReadingDirectionChoices.choices", "ReaderReadingDirectionEnum": "codex.serializers.fields.reader.ReadingDirectionField.class_choices", "VuetifyReadingDirectionEnum": "codex.serializers.fields.vuetify.VuetifyReadingDirectionChoiceField.class_choices", }, } ############ # Channels # ############ CHANNEL_LAYERS = { "default": {"BACKEND": "channels.layers.InMemoryChannelLayer"}, } ############### # Django Vite # ############### if DEBUG and not BUILD: import socket DEV_SERVER_HOST = VITE_HOST or socket.gethostname() DJANGO_VITE = { "default": { "dev_mode": DEBUG, "dev_server_host": DEV_SERVER_HOST, } } CSP_SCRIPT_SRC = ("'self'", "http://localhost:5173", "'nonce'") CSP_CONNECT_SRC = ("'self'", "ws://localhost:5173", "http://localhost:5173") ############ # Cachalot # ############ CACHALOT_UNCACHABLE_TABLES = frozenset( {"django_migrations", "django_session", "codex_useractive"} ) ################# # Custom Covers # ################# CUSTOM_COVERS_SUBDIR = "custom-covers" CUSTOM_COVERS_DIR = CONFIG_PATH / CUSTOM_COVERS_SUBDIR CUSTOM_COVERS_GROUP_DIRS = frozenset( {"publishers", "imprints", "series", "volumes", "story-arcs"} ) def create_custom_cover_group_dirs() -> None: """Create custom cover group dirs.""" for group_dir in CUSTOM_COVERS_GROUP_DIRS: custom_cover_group_dir = CUSTOM_COVERS_DIR / group_dir custom_cover_group_dir.mkdir(exist_ok=True, parents=True) create_custom_cover_group_dirs() ############ # Comicbox # ############ COMICBOX_CONFIG = FrozenAttrDict( get_config( { "loglevel": LOGLEVEL, "delete_keys": frozenset( { # Only pages and reprints are optimized away for sure with comicbox 2.0.2 "alternate_images", "bookmark", "credit_primaries", "ext", "identifier_primary_source", "manga", "pages", "prices", "remainders", "reprints", "rights", "updated_at", } ), } ) ) ================================================ FILE: codex/settings/codex.toml.default ================================================ # Codex Configuration File # Copy to config/codex.toml and edit as needed. # Environment variables override values in this file. # See README.md for full documentation. # [server] # Granian ASGI server settings # host = "0.0.0.0" # port = 9810 # Number of worker processes. 1 is recommended for containerized environments. # workers = 1 # HTTP version: "auto", "1", or "2" # http = "auto" # Enable websockets (required for Codex live updates) # websockets = true # HTTP path prefix for codex (e.g. "/codex" for reverse proxy sub-path) # url_path_prefix = "" # [logging] # Log level: TRACE, DEBUG, INFO, SUCCESS, WARNING, ERROR, CRITICAL # loglevel = "INFO" # log_retention = "6 months" # log_to_console = true # log_to_file = true # [browser] # max_obj_per_page = 100 # [throttle] # Rate limiting (requests per minute). 0 = disabled. # anon = 0 # user = 0 # opds = 0 # opensearch = 0 # [auth] # Allows authentication without authorization via the Remote-User header. # Only enable if you have authorization in front of Codex. Dangerous. # remote_user = false ================================================ FILE: codex/settings/config.py ================================================ """Load and merge the unified codex.toml config with environment variable overrides.""" import shutil import tomllib from collections.abc import Mapping, MutableMapping from os import environ from pathlib import Path from loguru import logger from codex.settings.hypercorn_migrate import migrate_hypercorn_config # Environment variable name to TOML key path mapping # Keys listed here can be overridden by setting the corresponding env var. _ENV_OVERRIDES: dict[str, str] = { # Server "GRANIAN_HOST": "server.host", "GRANIAN_PORT": "server.port", "GRANIAN_WORKERS": "server.workers", "GRANIAN_HTTP": "server.http", "GRANIAN_WEBSOCKETS": "server.websockets", "GRANIAN_URL_PATH_PREFIX": "server.url_path_prefix", # Logging "LOGLEVEL": "logging.loglevel", "CODEX_LOG_RETENTION": "logging.log_retention", "LOG_RETENTION": "logging.log_retention", # Old "CODEX_LOG_TO_CONSOLE": "logging.log_to_console", "CODEX_LOG_TO_FILE": "logging.log_to_file", # Import "CODEX_IMPORTER_LINK_FK_BATCH_SIZE": "importer.link_fk_batch_size", "CODEX_IMPORTER_LINK_M2M_BATCH_SIZE": "importer.link_m2m_batch_size", "CODEX_IMPORTER_DELETE_MAX_CHUNK_SIZE": "importer.delete_max_chunk_size", "CODEX_IMPORTER_SEARCH_SYNC_BATCH_MEMORY_RATIO": "importer.search_sync_batch_memory_ratio", "CODEX_IMPORTER_FILTER_BATCH_SIZE": "browser.filter_batch_size", # Importer Old "CODEX_LINK_FK_BATCH_SIZE": "importer.link_fk_batch_size", # Old "CODEX_LINK_M2M_BATCH_SIZE": "importer.link_m2m_batch_size", # Old "CODEX_DELETE_MAX_CHUNK_SIZE": "importer.delete_max_chunk_size", # Old "CODEX_SEARCH_SYNC_BATCH_MEMORY_RATIO": "importer.search_sync_batch_memory_ratio", # Old "CODEX_FILTER_BATCH_SIZE": "browser.filter_batch_size", # Old # Browser "CODEX_BROWSER_MAX_OBJ_PER_PAGE": "browser.max_obj_per_page", "CODEX_MAX_OBJ_PER_PAGE": "browser.max_obj_per_page", # Old # Throttle "CODEX_THROTTLE_ANON": "throttle.anon", "CODEX_THROTTLE_USER": "throttle.user", "CODEX_THROTTLE_OPDS": "throttle.opds", "CODEX_THROTTLE_OPENSEARCH": "throttle.opensearch", # Auth "CODEX_AUTH_REMOTE_USER": "auth.remote_user", # Debug "CODEX_DEBUG_LOG_AUTH_HEADERS": "debug.log_auth_headers", "CODEX_DEBUG_SLOW_QUERY_LIMIT": "debug.slow_query_limit", "CODEX_DEBUG_LOG_RESPONSE_TIME": "debug.log_response_time", "CODEX_DEBUG_LOG_REQUEST": "debug.log_request", # Old Debug "CODEX_LOG_AUTH_HEADERS": "debug.log_auth_headers", "CODEX_SLOW_QUERY_LIMIT": "debug.slow_query_limit", "CODEX_LOG_RESPONSE_TIME": "debug.log_response_time", "CODEX_LOG_REQUEST": "debug.log_request", } def _deep_get(data: Mapping, keypath: str, default=None): """Traverse nested dicts by a tuple of keys.""" current = data keys = keypath.split(".") for key in keys: if not isinstance(current, Mapping): return default current = current.get(key) if current is None: return default return current def _deep_set(data: MutableMapping, keypath: str, value) -> None: """Set a value in a nested dict, creating intermediate dicts as needed.""" keys = keypath.split(".") for key in keys[:-1]: data = data.setdefault(key, {}) data[keys[-1]] = value def _ensure_config(config_toml: Path, config_toml_default: Path) -> None: """Ensure that a valid config exists.""" if not config_toml.exists(): migrate_hypercorn_config(config_toml, config_toml_default) if not config_toml.exists(): config_toml.parent.mkdir(parents=True, exist_ok=True) shutil.copy(config_toml_default, config_toml) logger.info(f"Copied default config to {config_toml}") def _apply_env_overrides(config: MutableMapping) -> None: """Apply environment variable overrides on top of the TOML values.""" for env_name, keypath in _ENV_OVERRIDES.items(): env_val = environ.get(env_name) if env_val is not None: _deep_set(config, keypath, env_val) def load_codex_config(config_toml: Path, config_toml_default: Path) -> Mapping: """Load the unified codex config from TOML, then overlay env vars.""" _ensure_config(config_toml, config_toml_default) with config_toml.open("rb") as fh: config = tomllib.load(fh) _apply_env_overrides(config) return config # Convenience typed accessors def get_str(config: Mapping, keypath: str, default: str = "") -> str: """Get a string value from the config.""" val = _deep_get(config, keypath, default) return str(val) if val is not None else default def get_int(config: Mapping, keypath: str, default: int = 0) -> int: """Get an integer value from the config.""" val = _deep_get(config, keypath, default) return int(val) # pyright: ignore[reportArgumentType] def get_float(config: Mapping, keypath: str, default: float = 0.0) -> float: """Get a float value from the config.""" val = _deep_get(config, keypath, default) return float(val) # pyright: ignore[reportArgumentType] def get_bool(config: Mapping, keypath: str, *, default: bool = False) -> bool: """Get a boolean value from the config.""" val = _deep_get(config, keypath) if val is None: return default if isinstance(val, bool): return val if isinstance(val, str): return val.lower() not in {"", "false", "0", "no"} return bool(val) ================================================ FILE: codex/settings/hypercorn_migrate.py ================================================ """Migrate hypercorn.toml to codex.toml.""" import tomllib from pathlib import Path from typing import Any from loguru import logger HYPERCORN_FN = "hypercorn.toml" DEFAULT_CONFIG_HEAD_COUNT = 5 DEFAULT_CONFIG_TAIL_START = 18 # Default bind used to detect the common case. _DEFAULT_HOST = "0.0.0.0" # noqa: S104 _DEFAULT_PORT = 9810 _DEFAULT_WORKERS = 1 _DEFAULT_URL_PATH_PREFIX = "" def _parse_bind(bind_list: list[str]) -> tuple[str, int]: """ Extract (host, port) from a hypercorn bind list. Entries look like "0.0.0.0:9810" or "unix:/path". Takes the first TCP entry found. """ for raw_entry in bind_list: entry = raw_entry.strip() if entry.startswith(("unix:", "/")): continue if ":" in entry: host, _, port_str = entry.rpartition(":") host = host.strip("[]") # IPv6 bracket notation return host, int(port_str) return _DEFAULT_HOST, _DEFAULT_PORT def _toml_value(val: object) -> str: """Format a Python value as a TOML literal.""" if isinstance(val, bool): return "true" if val else "false" if isinstance(val, int): return str(val) if isinstance(val, float): return f"{val}" if isinstance(val, str): return f'"{val}"' if isinstance(val, list): inner = ", ".join(_toml_value(v) for v in val) return f"[{inner}]" return f'"{val}"' def _transform_hypercorn_config(old: dict): host = _DEFAULT_HOST port = _DEFAULT_PORT if "bind" in old: raw = old["bind"] host, port = _parse_bind(raw if isinstance(raw, list) else [str(raw)]) old["host"] = host try: old["port"] = int(port) except ValueError: old["port"] = _DEFAULT_PORT try: workers = int(old.get("workers", 1)) except ValueError: workers = 1 old["workers"] = workers old["root_path"] = old.get("root_path", "") def _build_codex_toml_line(lines: list[str], key: str, value, default): line = "# " if not value or value == default else "" if isinstance(value, str): value = '"' + value + '"' line += f"{key} = {value}" lines.append(line) def _append_granian_ssl_comment(lines: list[str], old: dict[str, Any]): # Just in case someone got way too clever. certfile = old.get("certfile") keyfile = old.get("keyfile") if certfile or keyfile: lines += [ "", "# TLS was configured in hypercorn.toml.", "# Granian uses environment variables instead:", ] if certfile: lines.append(f'# GRANIAN_SSL_CERTIFICATE="{certfile}"') if keyfile: lines.append(f'# GRANIAN_SSL_KEYFILE="{keyfile}"') def _build_codex_toml(old: dict, default_toml: Path) -> str: """Convert a parsed hypercorn config dict into codex.toml text.""" _transform_hypercorn_config(old) with default_toml.open() as f: default_lines = f.readlines() config_head = [ line.strip() for line in default_lines[:DEFAULT_CONFIG_HEAD_COUNT] ] config_tail = [ line.strip() for line in default_lines[DEFAULT_CONFIG_TAIL_START:] ] # Head of default config file lines: list[str] = config_head host = old["host"] port = old["port"] workers = old["workers"] root_path = old["root_path"] server_line = ( "# " if ( all( ( not host or host == _DEFAULT_HOST, not port or port == _DEFAULT_PORT, not workers or workers == _DEFAULT_WORKERS, not root_path or root_path == _DEFAULT_URL_PATH_PREFIX, ) ) ) else "" ) server_line += "[server]" lines += [server_line, "# Granian ASGI server settings"] _build_codex_toml_line(lines, "host", host, _DEFAULT_HOST) _build_codex_toml_line(lines, "port", port, _DEFAULT_PORT) # Just in case someone got clever lines.append( "# Number of worker processes. 1 is recommended for containerized environments." ) _build_codex_toml_line(lines, "workers", workers, 1) lines += [ '# HTTP version: "auto", "1", or "2"', '# http = "auto"', "# Enable websockets (required for Codex live updates)", "# websockets = true", ] _build_codex_toml_line(lines, "url_path_prefix", root_path, "") # Tail of the default config files lines += ["", *config_tail] _append_granian_ssl_comment(lines, old) lines.append("") return "\n".join(lines) def migrate_hypercorn_config(codex_toml: Path, default_toml: Path) -> None: """Convert hypercorn.toml to codex.toml.""" if codex_toml.exists(): return hypercorn_toml = codex_toml.parent / HYPERCORN_FN if not hypercorn_toml.exists(): return with hypercorn_toml.open("rb") as fh: old = tomllib.load(fh) if not old: return codex_toml.write_text(_build_codex_toml(old, default_toml), encoding="utf-8") backup = hypercorn_toml.with_suffix(".toml.bak") hypercorn_toml.rename(backup) logger.info(f"Migrated {hypercorn_toml} to {codex_toml} (backup: {backup})") ================================================ FILE: codex/settings/logging.py ================================================ """Logging Settings.""" from logging import Handler from typing import override from loguru import logger class LoguruHandler(Handler): """Redirect logging to loguru.""" @override def emit(self, record): """Emit loguru logs.""" try: level = logger.level(record.levelname).name except Exception: level = record.levelno logger.opt( depth=6, exception=record.exc_info, ).log(level, record.getMessage()) def get_logging_settings(loglevel: str | int, *, debug: bool) -> dict[str, int | dict]: """Get logging for settings.""" loggers: dict[str, dict] = { "watchfiles.main": { # DEBUG logs from watchfiles include a 5 second timeout "level": "INFO", "propagate": False, }, } if loglevel != "TRACE": loggers.update( { "asyncio": { "level": "INFO", }, } ) if not debug: loggers.update( { "urllib3.connectionpool": {"level": "INFO"}, "PIL": { "level": "INFO", }, } ) level = "DEBUG" if loglevel == "TRACE" else loglevel return { "version": 1, "disable_existing_loggers": True, "handlers": { "loguru": { "class": "codex.settings.logging.LoguruHandler", }, }, "root": { "handlers": ["loguru"], "level": level, "propagate": True, }, "django": { "handlers": ["loguru"], "level": level, "propagate": True, }, "django.request": { "handlers": ["loguru"], "level": level, "propagate": True, }, "django.server": { "handlers": ["loguru"], "level": level, "propagate": True, }, "loggers": loggers, } ================================================ FILE: codex/settings/secret_key.py ================================================ """Manage the django secret key.""" from django.core.management.utils import get_random_secret_key def get_secret_key(config_path) -> str: """Get the secret key from a file or create and write it.""" secret_key_path = config_path / "secret_key" try: with secret_key_path.open("r") as scf: secret_key = scf.read().strip() except FileNotFoundError: with secret_key_path.open("w") as scf: secret_key = get_random_secret_key() scf.write(secret_key) return secret_key ================================================ FILE: codex/settings/servestatic.py ================================================ """Servestatic setup functions.""" import re IMF_RE = re.compile(r"^.+[.-][0-9a-zA-Z_-]{8,12}\..+$") def immutable_file_test(_path, url): """For django-vite.""" # Match filename with 12 hex digits before the extension # e.g. app.db8f2edc0c8a.js return IMF_RE.match(url) ================================================ FILE: codex/settings/timezone.py ================================================ """Timezone settings functions.""" from tzlocal import get_localzone_name def get_time_zone(tz): """Get the timezone from the tz.""" if tz and not tz.startswith(":") and "etc/localtime" not in tz and "/" in tz: time_zone = tz elif get_localzone_name(): time_zone = get_localzone_name() else: time_zone = "Etc/UTC" return time_zone ================================================ FILE: codex/signals/__init__.py ================================================ """OS and Django Signals.""" ================================================ FILE: codex/signals/django_signals.py ================================================ """Django signal actions.""" # from django.db.models.signals import m2m_changed unused def connect_signals() -> None: """Connect actions to signals.""" # connection_created.connect(_db_connect) unused atm # m2m_changed.connect(_user_group_change) unused ================================================ FILE: codex/signals/os_signals.py ================================================ """OS Signal actions.""" import asyncio import signal from asyncio import Event from sys import platform from loguru import logger STOP_SIGNAL_NAMES = ( "SIGABRT", "SIGBREAK", "SIGBUS", "SIGHUP", "SIGINT", "SIGQUIT", "SIGSEGV", "SIGTERM", "SIGUSR2", ) RESTART_SIGNAL_NAMES = ("SIGUSR1",) RESTART_EVENT = Event() SHUTDOWN_EVENT = Event() def _shutdown_signal_handler(*_args) -> None: """Initiate Codex Shutdown.""" if SHUTDOWN_EVENT.is_set(): return logger.info("Asking granian to shut down gracefully. Could take 10 seconds...") SHUTDOWN_EVENT.set() def _restart_signal_handler(*_args) -> None: """Initiate Codex Restart.""" if RESTART_EVENT.is_set(): return logger.info("Restart signal received.") RESTART_EVENT.set() _shutdown_signal_handler() def bind_signals_to_loop_aux(sig_add, signal_names, handler) -> None: """Bind signal names to a handler.""" for name in signal_names: if sig := getattr(signal, name, None): sig_add(sig, handler) def bind_signals_to_loop() -> None: """Binds signals to the handlers.""" try: if platform == "win32": sig_add = signal.signal else: loop = asyncio.get_running_loop() sig_add = loop.add_signal_handler bind_signals_to_loop_aux(sig_add, STOP_SIGNAL_NAMES, _shutdown_signal_handler) bind_signals_to_loop_aux(sig_add, RESTART_SIGNAL_NAMES, _restart_signal_handler) except NotImplementedError: logger.info("Shutdown and restart signal handling not implemented on windows.") ================================================ FILE: codex/startup/__init__.py ================================================ """Initialize Codex Dataabse before running.""" from pathlib import Path from django.contrib.auth.models import User from django.core.cache import cache from django.db.models import F, Q from django.db.models.functions import Now from loguru import logger from rest_framework.authtoken.models import Token from codex.choices.admin import AdminFlagChoices from codex.librarian.status_controller import STATUS_DEFAULTS from codex.models import AdminFlag, CustomCover, LibrarianStatus, Library, Timestamp from codex.settings import ( AUTH_REMOTE_USER, CODEX_CONFIG_TOML, CUSTOM_COVERS_DIR, CUSTOM_COVERS_SUBDIR, DEBUG, GRANIAN_URL_PATH_PREFIX, RESET_ADMIN, ) from codex.startup.db import ensure_db_schema from codex.startup.registration import patch_registration_setting def ensure_superuser() -> None: """Ensure there is a valid superuser.""" if RESET_ADMIN or not User.objects.filter(is_superuser=True).exists(): admin_user, created = User.objects.update_or_create( username="admin", defaults={"is_staff": True, "is_superuser": True, "is_active": True}, ) admin_user.set_password("admin") admin_user.save() prefix = "Cre" if created else "Upd" logger.success(f"{prefix}ated admin user.") def _delete_orphans(model, field, names) -> None: """Delete orphans for declared models.""" params = {f"{field}__in": names} query = model.objects.filter(~Q(**params)) count, _ = query.delete() if count: logger.info(f"Deleted {count} orphan {model._meta.verbose_name_plural}.") def init_admin_flags() -> None: """Init admin flag rows.""" _delete_orphans(AdminFlag, "key", AdminFlagChoices.values) for key, title in AdminFlagChoices.choices: defaults = {"key": key, "on": key not in AdminFlag.FALSE_DEFAULTS} flag, created = AdminFlag.objects.get_or_create(defaults=defaults, key=key) if created: logger.info(f"Created AdminFlag: {title} = {flag.on}") def init_timestamps() -> None: """Init timestamps.""" _delete_orphans(Timestamp, "key", Timestamp.Choices.values) for enum in Timestamp.Choices: key = enum.value ts, created = Timestamp.objects.get_or_create(key=key) if enum == Timestamp.Choices.API_KEY and not ts.version: ts.save_uuid_version() if created: label = Timestamp.Choices(ts.key).label logger.debug(f"Created {label} timestamp.") def init_librarian_statuses() -> None: """Init librarian statuses.""" # Remove old statuses from previous versions of codex. _delete_orphans( LibrarianStatus, "status_type", LibrarianStatus.StatusChoices.values, ) # Create any missing statuses with defaults. for status_type, title in LibrarianStatus.StatusChoices.choices: _, created = LibrarianStatus.objects.get_or_create( defaults=STATUS_DEFAULTS, status_type=status_type ) if created: logger.debug(f"Created {title} LibrarianStatus.") # Reset any statuses left in a non-default state (jobs interrupted by # shutdown) without touching statuses already at defaults. # This ensures that updated_at is preserved across restarts. non_default_filter = ( Q(preactive__isnull=False) | Q(active__isnull=False) | Q(complete__isnull=False) | Q(total__isnull=False) | ~Q(subtitle="") ) if count := LibrarianStatus.objects.filter(non_default_filter).update( **STATUS_DEFAULTS, updated_at=Now() ): logger.debug(f"Reset {count} interrupted LibrarianStatuses to defaults.") def init_libraries() -> None: """Reset libraries that were mid-update when the server stopped.""" lib_count = Library.objects.filter(update_in_progress=True).update( update_in_progress=False, updated_at=Now() ) if lib_count: logger.debug(f"Reset {lib_count} Libraries' update_in_progress flag.") def init_custom_cover_dir() -> None: """Initialize the Custom Cover Dir singleton row.""" defaults = dict(**Library.CUSTOM_COVERS_DIR_DEFAULTS, path=CUSTOM_COVERS_DIR) covers_library, created = Library.objects.get_or_create( defaults=defaults, covers_only=True ) if created: logger.info("Created Custom Covers Dir settings in the db.") old_path = covers_library.path if Path(old_path) != CUSTOM_COVERS_DIR: Library.objects.filter(covers_only=True).update(path=str(CUSTOM_COVERS_DIR)) logger.info( f"Updated Custom Group Covers Dir path from {old_path} to {CUSTOM_COVERS_DIR}." ) def update_custom_covers_for_config_dir() -> None: """Update custom covers if the config dir changes.""" # This is okay, but I wouldn't need to do it if paths were constructed from # parent_folder and library.path # Fast lookup without relations seems better though, paths shouldn't change too much. # Determine which covers need re-pathing update_covers = [] delete_cover_pks = [] update_fields = ("path", "updated_at") group_covers = ( CustomCover.objects.filter(library__covers_only=True) .exclude(path__startswith=F("library__path")) .only(*update_fields) ) logger.debug(f"Checking that group custom covers are under {CUSTOM_COVERS_DIR}") for cover in group_covers.iterator(): old_path = cover.path parts = old_path.rsplit(f"/{CUSTOM_COVERS_SUBDIR}/") if len(parts) < 2: # noqa: PLR2004 delete_cover_pks.append(cover.pk) continue new_path = CUSTOM_COVERS_DIR / parts[1] if new_path.exists(): cover.path = str(new_path) update_covers.append(cover) else: delete_cover_pks.append(cover.pk) update_count = len(update_covers) logger.debug( f"Found {update_count} custom covers to update, {len(delete_cover_pks)} to delete." ) # Update covers if update_count: CustomCover.objects.bulk_update(update_covers, update_fields) logger.info( f"Updated {update_count} CustomCovers sources to point to new config dir" ) # Delete covers we can't reliably update. if delete_cover_pks: delete_qs = CustomCover.objects.filter(pk__in=delete_cover_pks) delete_count, _ = delete_qs.delete() logger.warning( f"Delete {delete_count} CustomCovers that could not be re-sourced after config dir change." ) def create_missing_auth_tokens() -> None: """Create missing auth tokens.""" num_created = 0 for user in User.objects.all(): _, created = Token.objects.get_or_create(user=user) if created: num_created += 1 logger.info(f"Created {num_created} missing auth tokens for users.") def ensure_db_rows() -> None: """Ensure database content is good.""" ensure_superuser() init_admin_flags() init_timestamps() init_librarian_statuses() init_libraries() init_custom_cover_dir() update_custom_covers_for_config_dir() create_missing_auth_tokens() def codex_init() -> bool: """Initialize the database and start the daemons.""" if not ensure_db_schema(): return False ensure_db_rows() patch_registration_setting() cache.clear() if GRANIAN_URL_PATH_PREFIX: path_prefix_log = ( f"Codex is being served from url path prefix: {GRANIAN_URL_PATH_PREFIX}" ) else: path_prefix_log = "Codex is being served without a url path prefix." logger.info(path_prefix_log) if DEBUG: logger.info(f"Will reload granian if {CODEX_CONFIG_TOML} changes") if AUTH_REMOTE_USER: logger.info("Remote User authorization enabled.") return True ================================================ FILE: codex/startup/custom_cover_libraries.py ================================================ """Database cover integrity checks and remedies.""" from django.apps import apps from codex.settings import ( CUSTOM_COVERS_DIR, ) def _repair_extra_custom_cover_libraries(library_model, log) -> None: """Attempt to remove the bad ones, probably futile.""" delete_libs = library_model.objects.filter(covers_only=True).exclude( path=CUSTOM_COVERS_DIR ) count, _ = delete_libs.delete() if count: log.warning( f"Removed {count} duplicate custom cover libraries pointing to unused custom cover dirs." ) def cleanup_custom_cover_libraries(log) -> None: """Cleanup extra custom cover libraries.""" try: try: library_model = apps.get_model("codex", "library") except LookupError: log.debug("Library model doesn't exist yet.") return if not library_model or not hasattr(library_model, "covers_only"): log.debug("Library model doesn't support custom cover library yet.") return _repair_extra_custom_cover_libraries(library_model, log) custom_cover_libraries = library_model.objects.filter(covers_only=True) count = custom_cover_libraries.count() if count > 1: count, _ = custom_cover_libraries.delete() if count: log.warning( f"Removed all ({count}) custom cover libraries, Unable to determine valid one. Will recreate upon startup." ) except Exception as exc: log.warning(f"Failed to check custom cover library for integrity - {exc}") ================================================ FILE: codex/startup/db.py ================================================ """Low level database utilities.""" import sqlite3 import subprocess from threading import Event, Lock from django.core.management import call_command from django.db import DEFAULT_DB_ALIAS, connection, connections from django.db.migrations.executor import MigrationExecutor from loguru import logger from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.scribe.janitor.integrity import ( fts_integrity_check, fts_rebuild, integrity_check, ) from codex.librarian.scribe.janitor.integrity.foreign_keys import fix_foreign_keys from codex.librarian.scribe.janitor.janitor import Janitor from codex.settings import ( BACKUP_DB_PATH, CONFIG_PATH, DB_PATH, FIX_FOREIGN_KEYS, FTS_INTEGRITY_CHECK, FTS_REBUILD, INTEGRITY_CHECK, ) from codex.startup.custom_cover_libraries import cleanup_custom_cover_libraries from codex.version import VERSION _REPAIR_FLAG_PATH = CONFIG_PATH / "rebuild_db" _REBUILT_DB_PATH = DB_PATH.parent / (DB_PATH.name + ".rebuilt") _REPAIR_ARGS = ("sqlite3", DB_PATH, ".recover") def _has_unapplied_migrations() -> bool: """Check if any migrations are outstanding.""" try: connection = connections[DEFAULT_DB_ALIAS] connection.prepare_database() executor = MigrationExecutor(connection) targets = [ key for key in executor.loader.graph.leaf_nodes() if key[0] == "codex" ] plan = executor.migration_plan(targets) except Exception as exc: logger.warning(f"has_unapplied_migrations(): {exc}") return False else: return bool(plan) def _get_backup_db_path(prefix): suffix = f".{prefix}{BACKUP_DB_PATH.suffix}" return BACKUP_DB_PATH.with_suffix(suffix) def _backup_db_before_migration() -> None: """If there are migrations to do, backup the db.""" backup_path = _get_backup_db_path(f"before-v{VERSION}") janitor = Janitor(logger, LIBRARIAN_QUEUE, Lock(), event=Event()) janitor.backup_db(show_status=False, backup_path=backup_path) logger.info("Backed up database before migrations") def _repair_db(log) -> None: """Run integrity checks on startup.""" if FIX_FOREIGN_KEYS: fix_foreign_keys(log) if INTEGRITY_CHECK: integrity_check(log, long=True) success = fts_integrity_check(log) if FTS_INTEGRITY_CHECK else True if FTS_REBUILD or not success: fts_rebuild() log.success("Rebuilt FTS virtual table.") cleanup_custom_cover_libraries(log) def _rebuild_db() -> bool: """Dump and rebuild the database.""" # Drastic if not _REPAIR_FLAG_PATH.exists(): return False logger.warning("REBUILDING DATABASE!!") _REBUILT_DB_PATH.unlink(missing_ok=True) recover_proc = subprocess.Popen(_REPAIR_ARGS, stdout=subprocess.PIPE) # noqa: S603 with sqlite3.connect(_REBUILT_DB_PATH) as new_db_conn, new_db_conn as new_db_cur: if recover_proc.stdout: for line in recover_proc.stdout: row = line.decode().strip() replaced_row = ( "PRAGMA writable_schema = reset;" if row == "PRAGMA writable_schema = off;" else row ) new_db_cur.execute(replaced_row) if recover_proc.stdout: recover_proc.stdout.close() recover_proc.wait(timeout=15) backup_path = _get_backup_db_path("before-rebuild") DB_PATH.rename(backup_path) logger.info("Backed up old db to %s", backup_path) _REBUILT_DB_PATH.replace(DB_PATH) _REPAIR_FLAG_PATH.unlink(missing_ok=True) logger.success("Rebuilt database. You may start codex normally now.") return True def ensure_db_schema() -> bool: """Ensure the db is good and up to date.""" logger.info("Ensuring database is correct and up to date...") table_names = connection.introspection.table_names() if db_exists := "django_migrations" in table_names: if _rebuild_db(): return False _repair_db(logger) if db_exists and _has_unapplied_migrations(): _backup_db_before_migration() call_command("migrate") logger.success("Database ready.") return True ================================================ FILE: codex/startup/loguru.py ================================================ """Initialize logging for codex.""" import sys from typing import Any from loguru import logger from codex.settings import DEBUG, LOG_PATH, LOG_RETENTION, LOG_ROTATION, LOGLEVEL def _log_format() -> str: fmt = "{time:YYYY-MM-DD HH:mm:ss} | {level: <8}" if DEBUG: fmt += " | " fmt += "{thread.name}:" fmt += "{name}:{function}:{line}" fmt += "" fmt += " | {message}" # fmt += "\n{exception}" only for format as a callable return fmt def loguru_init() -> None: """Initialize loguru sinks.""" logger.level("DEBUG", color="") logger.level("INFO", color="") logger.level("SUCCESS", color="") log_format = _log_format() kwargs: dict[str, Any] = { "backtrace": True, "catch": True, "enqueue": True, "format": log_format, "level": LOGLEVEL, } logger.remove() # Default "sys.stderr" sink is not picklable logger.add(sys.stdout, **kwargs, colorize=True) logger.add( LOG_PATH, **kwargs, rotation=LOG_ROTATION, retention=LOG_RETENTION, compression="xz", ) ================================================ FILE: codex/startup/registration.py ================================================ """Patch settings at runtime.""" from rest_registration.settings import registration_settings from codex.choices.admin import AdminFlagChoices from codex.models.admin import AdminFlag def patch_registration_setting() -> None: """Patch rest_registration setting.""" # Technically this is a no-no, but rest-registration makes it easy. enr = AdminFlag.objects.only("on").get(key=AdminFlagChoices.REGISTRATION.value).on registration_settings.user_settings["REGISTER_FLOW_ENABLED"] = enr ================================================ FILE: codex/static_src/img/.picopt_treestamps.yaml ================================================ .: 1776102799.208395 config: bigger: false convert_to: - WEBP formats: - GIF - JPEG - PNG - SVG - WEBP ignore: [] keep_metadata: true near_lossless: false recurse: true symlinks: true treestamps_config: ignore: !!set {} symlinks: true ================================================ FILE: codex/static_src/pwa/offline.html ================================================ Codex is offline

Can't connect to the Codex server

Check your network connectivity ================================================ FILE: codex/static_src/robots.txt ================================================ User-agent: * Disallow: ================================================ FILE: codex/templates/README.md ================================================ # templates Django Templates for serving the codex app index and special generated files. ================================================ FILE: codex/templates/headers-icons.html ================================================ {% load static %} ================================================ FILE: codex/templates/headers-script-globals.html ================================================ {% load static %} ================================================ FILE: codex/templates/index.html ================================================ {% load django_vite %} {{ title }} {% include "headers-icons.html" %} {% include "pwa/headers.html" %} {% include "headers-script-globals.html" %} {% vite_hmr_client nonce=csp_nonce %} {% vite_asset "src/main.js" nonce=csp_nonce %}
================================================ FILE: codex/templates/opds_v1/index.xml ================================================ {% load static %} {% load tz %} {{ id_tag }} {% static 'img/logo-32.webp' %} {{ title }} Codex {{ updated }} {% if items_per_page %} {{ items_per_page }} {% endif %} {% if total_results %} {{ total_results }} {% endif %} {% for link in links %} {% endfor %} {% for entry in entries %} {{ entry.id_tag }} {{ entry.title }} {% for link in entry.links %} {% endfor %} {% if entry.issued %} {{ entry.issued }} {% endif %} {% if entry.published %} {{ entry.published }} {% endif %} {% if entry.updated %} {{ entry.updated }} {% endif %} {% if entry.publisher %} {{ entry.publisher }} {% endif %} {% if entry.language and entry.language.name %} {{ entry.language.name }} {% endif %} {% if entry.summary %} {# pocketbooks supports content but not summary #} {{ entry.summary }} {% endif %} {% for author in entry.authors %} {{ author.name }} {% if author.url %} {{ author.url }} {% endif %} {% endfor %} {% for cont in entry.contributors %} {{ cont.name }} {% if cont.url %} {{ cont.url }} {% endif %} {% endfor %} {% for term_key, categories in entry.category_groups.items %} {% for category in categories %} {% endfor %} {% endfor %} {% endfor %} ================================================ FILE: codex/templates/opds_v1/opensearch_v1.xml ================================================ {% load static %} Codex Codex OPDS Search UTF-8 UTF-8 {% static 'img/logo.svg' %} ================================================ FILE: codex/templates/pwa/headers.html ================================================ {% load static %} {# Apple as of iOS 26 still can't be an svg #} {# Chrome #} ================================================ FILE: codex/templates/pwa/manifest.webmanifest ================================================ { {% load static %} "name": "Codex", "short_name": "Codex", "description": "Comic book reader", "scope": "{% url 'app:start' %}", "start_url" : "{% url 'app:start' %}", "icons": [ { "src": "{% static 'img/logo.svg' %}", "sizes": "any 192x192 512x512", "type": "image/svg+xml", "purpose": "any" }, { "src": "{% static 'img/logo-maskable.svg' %}", "sizes": "any 192x192 512x512", "type": "image/svg+xml", "purpose": "maskable" } ], "display": "standalone", "lang": "en-US", "background_color": "#2A2A2A", "theme_color": "#cc7b19", "dir": "auto", "orientation": "any", "status_bar": "default" } ================================================ FILE: codex/templates/pwa/serviceworker-register.js ================================================ // Initialize the service worker if ("serviceWorker" in navigator) { navigator.serviceWorker .register("{% url 'pwa:serviceworker' %}", { scope: "{% url 'app:start' %}", }) .then(function (registration) { // Registration was successful console.debug( "codex-pwa: ServiceWorker registration successful with scope:", registration.scope, ); return true; }) .catch(function (error) { // registration failed :( console.warn("codex-pwa: ServiceWorker registration failed:", error); }); } ================================================ FILE: codex/templates/pwa/serviceworker.js ================================================ // {% load static %} const CACHE_PREFIX = "codex-pwa-v"; const STATIC_CACHE_NAME = CACHE_PREFIX + new Date().getSeconds(); const OFFLINE_PATH = "{% static 'pwa/offline.html' %}"; const FILES_TO_CACHE = [ OFFLINE_PATH, "{% static 'img/logo-maskable-180.webp' %}", "{% static 'img/logo-maskable.svg' %}", "{% static 'img/logo.svg' %}", ]; // Cache offline page on install self.addEventListener("install", (event) => { this.skipWaiting(); event.waitUntil( caches.open(STATIC_CACHE_NAME).then((cache) => { return cache.addAll(FILES_TO_CACHE); }), ); }); // Clear old caches on activate self.addEventListener("activate", (event) => { event.waitUntil( caches.keys().then((cacheNames) => { return Promise.all( cacheNames .filter((cacheName) => cacheName.startsWith(CACHE_PREFIX)) .filter((cacheName) => cacheName !== STATIC_CACHE_NAME) .map((cacheName) => caches.delete(cacheName)), ); }), ); }); // Serve from Cache self.addEventListener("fetch", (event) => { event.respondWith( caches .match(event.request) .then((response) => { return response || fetch(event.request); }) .catch(() => { return caches.match(OFFLINE_PATH); }), ); }); ================================================ FILE: codex/urls/__init__.py ================================================ """Django URLS.""" ================================================ FILE: codex/urls/api/__init__.py ================================================ """API urls.""" ================================================ FILE: codex/urls/api/admin.py ================================================ """codex:api:v3:admin URL Configuration.""" from django.urls import path from django.views.decorators.cache import never_cache from codex.views.admin.api_key import AdminAPIKey from codex.views.admin.flag import AdminFlagViewSet from codex.views.admin.group import AdminGroupViewSet from codex.views.admin.library import ( AdminFailedImportViewSet, AdminFolderListView, AdminLibraryViewSet, ) from codex.views.admin.stats import AdminStatsView from codex.views.admin.tasks import ( AdminLibrarianStatusActiveViewSet, AdminLibrarianStatusAllViewSet, AdminLibrarianTaskView, ) from codex.views.admin.user import AdminUserChangePasswordView, AdminUserViewSet READ = {"get": "list"} RETRIEVE = {"get": "retrieve"} CREATE = {"post": "create"} UPDATE = {"put": "partial_update"} DELETE = {"delete": "destroy"} app_name = "admin" urlpatterns = [ path( "user", AdminUserViewSet.as_view({**CREATE, **READ}), name="user_read", ), path( "user//", AdminUserViewSet.as_view({**UPDATE, **DELETE}), name="user_update_delete", ), path( "user//password", AdminUserChangePasswordView.as_view(), name="user_password_update", ), path("group", AdminGroupViewSet.as_view({**CREATE, **READ}), name="group"), path( "group//", AdminGroupViewSet.as_view({**UPDATE, **DELETE}), name="group_update", ), path("flag", AdminFlagViewSet.as_view({**READ}), name="flag"), path( "flag//", AdminFlagViewSet.as_view({**RETRIEVE, **UPDATE}), name="one_flag", ), path( "library", AdminLibraryViewSet.as_view({**CREATE, **READ}), name="library", ), path( "library//", AdminLibraryViewSet.as_view({**UPDATE, **DELETE}), name="library", ), path("folders", AdminFolderListView.as_view(), name="folders"), path( "failed-import", AdminFailedImportViewSet.as_view({**READ}), name="failed_import", ), path( "librarian/status", never_cache(AdminLibrarianStatusActiveViewSet.as_view({**READ})), name="librarian_status", ), path( "librarian/status/all", never_cache(AdminLibrarianStatusAllViewSet.as_view({**READ})), name="librarian_status_all", ), path("librarian/task", AdminLibrarianTaskView.as_view(), name="librarian_task"), path("stats", AdminStatsView.as_view(), name="stats"), path("api_key", AdminAPIKey.as_view(), name="api_key"), ] ================================================ FILE: codex/urls/api/auth.py ================================================ """codex:api:v3:auth URL Configuration.""" from django.urls import include, path from codex.views.auth import AuthToken from codex.views.public import AdminFlagsView from codex.views.timezone import TimezoneView app_name = "auth" urlpatterns = [ path("", include("rest_registration.api.urls")), path("flags/", AdminFlagsView.as_view(), name="flags"), path("token/", AuthToken.as_view(), name="token"), path("timezone/", TimezoneView.as_view(), name="timezone"), ] ================================================ FILE: codex/urls/api/browser.py ================================================ """codex:api:v3:browser URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_control, cache_page, never_cache from codex.urls.const import BROWSER_TIMEOUT, COVER_MAX_AGE, PAGE_MAX_AGE from codex.views.browser.bookmark import BookmarkView from codex.views.browser.browser import BrowserView from codex.views.browser.choices import BrowserChoicesAvailableView, BrowserChoicesView from codex.views.browser.cover import CoverView from codex.views.browser.download import GroupDownloadView from codex.views.browser.metadata import MetadataView from codex.views.browser.saved_settings import ( SavedBrowserSettingsListView, SavedBrowserSettingsLoadView, ) from codex.views.browser.settings import BrowserSettingsView from codex.views.lazy_import import LazyImportView METADATA_TIMEOUT = PAGE_MAX_AGE app_name = "browser" urlpatterns = [ # # # Browser path( "/", cache_page(BROWSER_TIMEOUT)(BrowserView.as_view()), name="page", ), path( "/choices/", cache_page(BROWSER_TIMEOUT)(BrowserChoicesView.as_view()), name="choices_field", ), path( "/choices_available", cache_page(BROWSER_TIMEOUT)(BrowserChoicesAvailableView.as_view()), name="choices_available", ), path( "/metadata", cache_page(METADATA_TIMEOUT)(MetadataView.as_view()), name="metadata", ), # # # Bookmark path( "/bookmark", BookmarkView.as_view(), name="bookmark", ), path("settings", never_cache(BrowserSettingsView.as_view()), name="settings"), # # # Saved Settings path( "settings/saved", never_cache(SavedBrowserSettingsListView.as_view()), name="saved_settings_list", ), path( "settings/saved/", never_cache(SavedBrowserSettingsLoadView.as_view()), name="saved_settings_detail", ), # # # Cover path( "/cover.webp", cache_control(max_age=COVER_MAX_AGE, public=True)(CoverView.as_view()), name="cover", ), # # # Download path( "/download/", GroupDownloadView.as_view(), name="download", ), # # # Lazy Import path( "/import", LazyImportView.as_view(), name="import", ), ] ================================================ FILE: codex/urls/api/reader.py ================================================ """codex:api:v3:reader URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_control from codex.urls.const import PAGE_MAX_AGE from codex.views.download import DownloadView from codex.views.reader.page import ReaderPageView from codex.views.reader.reader import ReaderView from codex.views.reader.settings import ReaderSettingsView app_name = "issue" urlpatterns = [ # # # Reader path("", ReaderView.as_view(), name="reader"), path( "//page.jpg", cache_control(max_age=PAGE_MAX_AGE, public=True)(ReaderPageView.as_view()), name="page", ), path("settings", ReaderSettingsView.as_view(), name="settings"), path( "/settings", ReaderSettingsView.as_view(), name="comic_settings", ), # # # Download path( "/download/", DownloadView.as_view(), name="download", ), ] ================================================ FILE: codex/urls/api/root.py ================================================ """codex:api URL Configuration.""" from django.urls import include, path app_name = "api" urlpatterns = [ path("v3/", include("codex.urls.api.v3")), ] ================================================ FILE: codex/urls/api/v3.py ================================================ """codex:api:v3 URL Configuration.""" from django.urls import include, path from drf_spectacular.views import ( SpectacularAPIView, SpectacularSwaggerSplitView, ) from codex.views.browser.mtime import MtimeView from codex.views.opds.urls import OPDSURLsView from codex.views.version import VersionView app_name = "v3" urlpatterns = [ path("auth/", include("codex.urls.api.auth")), # reader must come first to occlude browser group path("c/", include("codex.urls.api.reader")), path("/", include("codex.urls.api.browser")), path("mtime", MtimeView.as_view(), name="mtimes"), path("version", VersionView.as_view(), name="version"), path("admin/", include("codex.urls.api.admin")), path("schema", SpectacularAPIView.as_view(), name="schema"), path("opds-urls", OPDSURLsView.as_view(), name="opds_urls"), path( "", SpectacularSwaggerSplitView.as_view(url_name="api:v3:schema"), name="base", ), ] ================================================ FILE: codex/urls/app.py ================================================ """codex:app URL Configuration.""" from django.urls import path, re_path from django.views.decorators.cache import cache_control from django.views.generic import RedirectView from codex.views.download import FileView from codex.views.frontend import IndexView app_name = "app" BOOK_AGE = 60 * 60 * 24 * 7 urlpatterns = [ path("//", IndexView.as_view(), name="route"), path( "c//book.pdf", cache_control(max_age=BOOK_AGE)(FileView.as_view()), name="pdf", ), path("admin/", IndexView.as_view(), name="admin"), path("error/", IndexView.as_view(), name="error"), path("", IndexView.as_view(), name="start"), re_path( ".*", RedirectView.as_view(pattern_name="app:start", permanent=False), name="catchall", ), ] ================================================ FILE: codex/urls/const.py ================================================ """Timeouts.""" COMMON_TIMEOUT = 60 * 60 BROWSER_TIMEOUT = 60 * 5 COVER_MAX_AGE = 60 * 60 * 24 * 7 PAGE_MAX_AGE = COVER_MAX_AGE OPDS_TIMEOUT = 0 # BROWSER_TIMEOUT ================================================ FILE: codex/urls/converters.py ================================================ """Custom url converters.""" from django.urls.converters import StringConverter from loguru import logger class GroupConverter(StringConverter): """Only accept valid browser groups.""" regex = r"[rpisvcfa]" class IntListConverter: """Integer list converter.""" regex = r"\d+(,\d+)*" DELIMITER = "," def to_python(self, value) -> tuple: """Convert string list to tuple of ints.""" parts = value.split(self.DELIMITER) pks = set() for part in parts: try: pk = int(part) if pk == 0: pks = set() break pks.add(pk) except ValueError: reason = f"Bad pk list submitted to IntConverter {part=} in {value=}" logger.warning(reason) return tuple(sorted(pks)) def to_url(self, value) -> str: """Convert sequence of ints to a comma delineated string list.""" pks: set[str] = set() if value: for pk in sorted(value): if pk == 0: pks = set() break pks.add(str(pk)) return self.DELIMITER.join(pks) if pks else "0" ================================================ FILE: codex/urls/opds/__init__.py ================================================ """OPDS urls.""" ================================================ FILE: codex/urls/opds/authentication.py ================================================ """codex:opds:v1 URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_page from codex.urls.const import COMMON_TIMEOUT from codex.views.opds.authentication.v1 import OPDSAuthentication1View app_name = "auth" urlpatterns = [ path( "v1", cache_page(COMMON_TIMEOUT)(OPDSAuthentication1View.as_view()), name="v1", ), ] ================================================ FILE: codex/urls/opds/binary.py ================================================ """codex:opds:v1 URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_control from codex.urls.const import COVER_MAX_AGE, PAGE_MAX_AGE from codex.views.opds.binary import OPDSCoverView, OPDSDownloadView, OPDSPageView app_name = "bin" urlpatterns = [ # # Reader path( "c///page.jpg", cache_control(max_age=PAGE_MAX_AGE, public=True)(OPDSPageView.as_view()), name="page", ), # # utilities path( "//cover.webp", cache_control(max_age=COVER_MAX_AGE, public=True)(OPDSCoverView.as_view()), name="cover", ), path( "c//download/", OPDSDownloadView.as_view(), name="download", ), ] ================================================ FILE: codex/urls/opds/root.py ================================================ """codex:opds URL Configuration.""" from django.urls import include, path, re_path from django.views.generic.base import RedirectView from codex.views.opds.v2.feed import OPDS2StartView app_name = "opds" urlpatterns = ( path( "auth/", include("codex.urls.opds.authentication"), name="auth", ), path("bin/", include("codex.urls.opds.binary")), path("v1.2/", include("codex.urls.opds.v1")), path( "v2.0", OPDS2StartView.as_view(), {"group": "r", "pks": (0,), "page": 1}, name="start", ), path("v2.0/", include("codex.urls.opds.v2")), re_path(r"auth.*", RedirectView.as_view(pattern_name="opds:auth:v1")), re_path(r"v?1[\.\d]*", RedirectView.as_view(pattern_name="opds:v1:start")), re_path(r"v?2[\.\d]*", RedirectView.as_view(pattern_name="opds:v2:start")), path("", RedirectView.as_view(pattern_name="opds:v1:start")), ) ================================================ FILE: codex/urls/opds/v1.py ================================================ """codex:opds:v1 URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_page from codex.urls.const import OPDS_TIMEOUT from codex.views.opds.opensearch.v1 import OpenSearch1View from codex.views.opds.v1.feed import OPDS1FeedView, OPDS1StartView app_name = "v1" urlpatterns = [ # # Browser path( "//", cache_page(OPDS_TIMEOUT)(OPDS1FeedView.as_view()), name="feed", ), path( "opensearch/v1.1", cache_page(OPDS_TIMEOUT)(OpenSearch1View.as_view()), name="opensearch_v1", ), # Start path( "", cache_page(OPDS_TIMEOUT)(OPDS1StartView.as_view()), {"group": "r", "pks": (0,), "page": 1}, name="start", ), ] ================================================ FILE: codex/urls/opds/v2.py ================================================ """codex:opds:v1 URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_page from django.views.generic import RedirectView from codex.urls.const import OPDS_TIMEOUT from codex.views.opds.v2.feed import OPDS2FeedView, OPDS2StartView from codex.views.opds.v2.manifest import OPDS2ManifestView from codex.views.opds.v2.progression import OPDS2ProgressionView app_name = "v2" urlpatterns = [ # # Browser path( "c//1", cache_page(OPDS_TIMEOUT)(OPDS2ManifestView.as_view()), {"group": "c", "page": 1}, name="manifest", ), path( "//position", cache_page(OPDS_TIMEOUT)(OPDS2ProgressionView.as_view()), name="position", ), path( "//", cache_page(OPDS_TIMEOUT)(OPDS2FeedView.as_view()), name="feed", ), path( "", cache_page(OPDS_TIMEOUT)(OPDS2StartView.as_view()), {"group": "r", "pks": (0,), "page": 1}, name="start", ), path("catalog", RedirectView.as_view(pattern_name="opds:v2:start")), ] ================================================ FILE: codex/urls/pwa.py ================================================ """codex:pwa URL Configuration.""" from django.urls import path from django.views.decorators.cache import cache_page from codex.urls.const import COMMON_TIMEOUT from codex.views.pwa import ( ServiceWorkerRegisterView, ServiceWorkerView, WebManifestView, ) app_name = "pwa" urlpatterns = [ path( "manifest.webmanifest", cache_page(COMMON_TIMEOUT)(WebManifestView.as_view()), name="manifest", ), path( "serviceworker-register.js", cache_page(COMMON_TIMEOUT)(ServiceWorkerRegisterView.as_view()), name="serviceworker_register", ), path( "serviceworker.js", cache_page(COMMON_TIMEOUT)(ServiceWorkerView.as_view()), name="serviceworker", ), ] ================================================ FILE: codex/urls/root.py ================================================ """ Codex URL Configuration. https://docs.djangoproject.com/en/dev/topics/http/urls/ """ from django.contrib.staticfiles.storage import staticfiles_storage from django.urls import include, path, register_converter from django.views.generic.base import RedirectView from codex.settings import DEBUG from codex.urls.converters import GroupConverter, IntListConverter from codex.views.healthcheck import health_check_view register_converter(GroupConverter, "group") register_converter(IntListConverter, "int_list") urlpatterns = [] if DEBUG: # Pyright doesn't follow logic so will try to find these types. from schema_graph.views import Schema urlpatterns += [ path("schema/", Schema.as_view()), ] urlpatterns += [ path( "favicon.ico", RedirectView.as_view(url=staticfiles_storage.url("img/logo.svg")), name="favicon", ), path( "robots.txt", RedirectView.as_view(url=staticfiles_storage.url("robots.txt")), name="robots", ), path("api/", include("codex.urls.api.root")), path("opds/", include("codex.urls.opds.root")), path("opds", RedirectView.as_view(pattern_name="opds:v1:start")), path("health", health_check_view, name="healthcheck"), path("", include("codex.urls.pwa")), # The app must be last because it includes a catch-all path path("", include("codex.urls.app")), ] ================================================ FILE: codex/urls/spectacular.py ================================================ """Spectacular hooks.""" ALLOW_PREFIXES = ("/api", "/opds") def allow_list(endpoints) -> list: """Allow only API endpoints.""" drf_endpoints = [] for endpoint in endpoints: path = endpoint[0] for prefix in ALLOW_PREFIXES: if path.startswith(prefix): drf_endpoints += [endpoint] break return drf_endpoints ================================================ FILE: codex/util.py ================================================ """Utility functions.""" from collections.abc import Mapping def max_none(*args): """None aware math.max.""" return max(filter(lambda x: x is not None, args), default=None) def mapping_to_dict(data) -> dict | set | frozenset | tuple | list: """Convert nested Mapping objects to dicts.""" if isinstance(data, Mapping): return {key: mapping_to_dict(value) for key, value in data.items()} if isinstance(data, list | tuple | frozenset | set): return type(data)(mapping_to_dict(item) for item in data) return data def flatten(seq: tuple | list | frozenset | set): """Flatten sequence.""" flattened = [] for item in seq: if isinstance(item, tuple | list | set | frozenset): # To make recursive, instead of list could call flatten again flattened.extend(list(item)) else: flattened.append(item) return seq.__class__(tuple(flattened)) ================================================ FILE: codex/version.py ================================================ """Hold the current codex version.""" from importlib.metadata import PackageNotFoundError, version PACKAGE_NAME = "codex" def get_version() -> str: """Get the current installed codex version.""" try: v = version(PACKAGE_NAME) except PackageNotFoundError: v = "test" return v VERSION = get_version() ================================================ FILE: codex/views/README.md ================================================ # views The Django Rest Framework views. [API Docs](https://www.django-rest-framework.org/api-guide/views/). ================================================ FILE: codex/views/__init__.py ================================================ """Django Rest Framework views.""" ================================================ FILE: codex/views/admin/__init__.py ================================================ """Admin Views.""" ================================================ FILE: codex/views/admin/api_key.py ================================================ """API Key Endpoint.""" from drf_spectacular.utils import extend_schema from rest_framework.response import Response from codex.models import Timestamp from codex.serializers.admin.stats import APIKeySerializer from codex.views.admin.auth import AdminGenericAPIView class AdminAPIKey(AdminGenericAPIView): """Regenerate API Key.""" serializer_class = APIKeySerializer input_serializer_class = None @extend_schema(request=input_serializer_class) def put(self, *_args, **_kwargs) -> Response: """Regenerate the API Key.""" ts = Timestamp.objects.get(key=Timestamp.Choices.API_KEY.value) ts.save_uuid_version() serializer = self.get_serializer(ts) return Response(serializer.data) ================================================ FILE: codex/views/admin/auth.py ================================================ """Admin Auth.""" from rest_framework.generics import GenericAPIView from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet class AdminAuthMixin: """Admin Authorization Classes.""" permission_classes = (IsAdminUser,) class AdminAPIView(AdminAuthMixin, APIView): """Admin API View.""" class AdminGenericAPIView(AdminAuthMixin, GenericAPIView): """Admin Generic API View.""" class AdminModelViewSet(AdminAuthMixin, ModelViewSet): """Admin Model View Set.""" class AdminReadOnlyModelViewSet(AdminAuthMixin, ReadOnlyModelViewSet): """Admin Read Only Model View Set.""" ================================================ FILE: codex/views/admin/flag.py ================================================ """Admin Flag View.""" from typing import override from codex.choices.admin import AdminFlagChoices from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.notifier.tasks import ADMIN_FLAGS_CHANGED_TASK from codex.librarian.tasks import WakeCronTask from codex.models import AdminFlag from codex.serializers.admin.flags import AdminFlagSerializer from codex.startup.registration import patch_registration_setting from codex.views.admin.auth import AdminModelViewSet _REFRESH_LIBRARY_FLAGS = frozenset( flag.value for flag in ( AdminFlagChoices.FOLDER_VIEW, AdminFlagChoices.NON_USERS, AdminFlagChoices.BANNER_TEXT, ) ) class AdminFlagViewSet(AdminModelViewSet): """Admin Flag Viewset.""" queryset = AdminFlag.objects.all() serializer_class = AdminFlagSerializer lookup_field = "key" def _on_change(self) -> None: """Signal UI that its out of date.""" key = self.kwargs.get("key") if key == AdminFlagChoices.REGISTRATION.value: patch_registration_setting() elif key == AdminFlagChoices.SEND_TELEMETRY.value: LIBRARIAN_QUEUE.put(WakeCronTask()) # Heavy handed refresh everything, but simple. # Folder View could only change the group view and let the ui decide # Registration only needs to change the enable flag if key in _REFRESH_LIBRARY_FLAGS: LIBRARIAN_QUEUE.put(ADMIN_FLAGS_CHANGED_TASK) @override def perform_update(self, serializer) -> None: """Perform update and hook for change.""" super().perform_update(serializer) self._on_change() ================================================ FILE: codex/views/admin/group.py ================================================ """Group View.""" from typing import override from django.contrib.auth.models import Group from django.core.cache import cache from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.notifier.tasks import GROUPS_CHANGED_TASK from codex.serializers.admin.groups import GroupSerializer from codex.views.admin.auth import AdminModelViewSet class AdminGroupViewSet(AdminModelViewSet): """Admin Group Viewset.""" queryset = Group.objects.prefetch_related("user_set", "library_set").select_related( "groupauth" ) serializer_class = GroupSerializer _CHANGE_FIELDS = frozenset({"librarySet", "userSet", "groupauth"}) def _on_change(self, validated_data=None) -> None: """On change hook.""" if not validated_data or frozenset(validated_data.keys()).intersection( self._CHANGE_FIELDS ): cache.clear() LIBRARIAN_QUEUE.put(GROUPS_CHANGED_TASK) @override def get_serializer(self, *args, **kwargs): """Allow creation with the model serializer without users & libraries.""" kwargs["partial"] = True return super().get_serializer(*args, **kwargs) @override def perform_update(self, serializer) -> None: """Perform update and run hooks.""" validated_data = serializer.validated_data super().perform_update(serializer) self._on_change(validated_data) @override def perform_create(self, serializer) -> None: """Perform create and run hooks.""" validated_data = serializer.validated_data super().perform_create(serializer) self._on_change(validated_data) @override def perform_destroy(self, instance) -> None: """Perform destroy and run hooks.""" super().perform_destroy(instance) self._on_change() ================================================ FILE: codex/views/admin/library.py ================================================ """Admin Library Views.""" from pathlib import Path from typing import override from django.core.cache import cache from django.db.models import Case, Subquery, When from django.db.models.aggregates import Count from django.db.models.expressions import Value from django.db.models.functions import Coalesce from django.db.utils import NotSupportedError from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.exceptions import ValidationError from rest_framework.response import Response from codex.librarian.fs.poller.tasks import FSPollLibrariesTask from codex.librarian.fs.watcher.tasks import FSWatcherRestartTask from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.notifier.tasks import LIBRARY_CHANGED_TASK from codex.models import CustomCover, FailedImport, Folder, Library from codex.serializers.admin.libraries import ( AdminFolderListSerializer, AdminFolderSerializer, FailedImportSerializer, LibrarySerializer, ) from codex.views.admin.auth import AdminGenericAPIView, AdminModelViewSet _CUSTOM_COVER_COUNT = Coalesce( Subquery( CustomCover.objects.exclude(group="f") .values("group") # A dummy group-by to allow the annotation .annotate(cnt=Count("pk")) .values("cnt")[:1] ), Value(0), ) class AdminLibraryViewSet(AdminModelViewSet): """Admin Library Viewset.""" _WATCHER_SYNC_FIELDS = frozenset({"events", "poll", "pollEvery"}) serializer_class = LibrarySerializer queryset = ( Library.objects.prefetch_related("groups") .annotate( comic_count=Case( # When covers_only is True, use the subquery result. # Coalesce ensures we get 0 instead of NULL if CustomCover is empty. When(covers_only=True, then=_CUSTOM_COVER_COUNT), # Otherwise, use the standard count. default=Count("comic", distinct=True), ), failed_count=Count("failedimport", distinct=True), ) .defer("update_in_progress", "created_at", "updated_at") ) @classmethod def _sync_watcher(cls, validated_keys=None) -> None: if validated_keys is None or validated_keys.intersection( cls._WATCHER_SYNC_FIELDS ): task = FSWatcherRestartTask() LIBRARIAN_QUEUE.put(task) @staticmethod def _on_change() -> None: cache.clear() task = LIBRARY_CHANGED_TASK LIBRARIAN_QUEUE.put(task) def _create_library_folder(self, library) -> None: folder = Folder( library=library, path=library.path, name=Path(library.path).name ) folder.save() @staticmethod def _poll(pk, force) -> None: task = FSPollLibrariesTask(frozenset({pk}), force) LIBRARIAN_QUEUE.put(task) @override def perform_create(self, serializer) -> None: """Perform create and run hooks.""" super().perform_create(serializer) if serializer.validated_data.get("covers_only"): raise NotSupportedError library = Library.objects.only("pk", "path").get( path=serializer.validated_data["path"] ) self._create_library_folder(library) self._sync_watcher() self._poll(library.pk, force=False) @override def perform_update(self, serializer) -> None: """Perform update an run hooks.""" validated_keys = frozenset(serializer.validated_data.keys()) pk = self.kwargs["pk"] library = Library.objects.get(pk=pk) if library.covers_only and serializer.validated_data.get("path"): raise NotSupportedError super().perform_update(serializer) if "groupSet" in validated_keys: self._on_change() self._sync_watcher(validated_keys) self._poll(pk, force=False) @override def perform_destroy(self, instance) -> None: """Perform destroy and run hooks.""" if instance.covers_only: raise NotSupportedError super().perform_destroy(instance) self._sync_watcher() self._on_change() class AdminFailedImportViewSet(AdminModelViewSet): """Admin FailedImport Viewset.""" queryset = FailedImport.objects.defer("updated_at") serializer_class = FailedImportSerializer class AdminFolderListView(AdminGenericAPIView): """List server directories.""" serializer_class = AdminFolderListSerializer input_serializer_class = AdminFolderSerializer @staticmethod def _get_dirs(root_path, show_hidden) -> tuple: """Get dirs list.""" dirs = [] if root_path.parent != root_path: dirs += [".."] subdirs = [] for subpath in root_path.iterdir(): if subpath.name.startswith(".") and not show_hidden: continue if subpath.resolve().is_dir(): subdirs.append(subpath.name) dirs += sorted(subdirs) return tuple(dirs) @extend_schema(request=input_serializer_class) def get(self, *_args, **_kwargs) -> Response: """Get subdirectories for a path.""" try: serializer = self.input_serializer_class(data=self.request.GET) serializer.is_valid(raise_exception=True) root_path = Path(serializer.validated_data.get("path", ".")).resolve() show_hidden = serializer.validated_data.get("show_hidden", False) dirs = self._get_dirs(root_path, show_hidden) data = {"root_folder": str(root_path), "folders": dirs} serializer = self.get_serializer(data) except ValidationError: raise except Exception as exc: logger.exception("get admin folder list view") reason = "Server Error" raise ValidationError(reason) from exc else: return Response(serializer.data) ================================================ FILE: codex/views/admin/permissions.py ================================================ """Codex drf permissions.""" from typing import override from rest_framework.permissions import BasePermission, IsAdminUser from codex.models import Timestamp class HasAPIKeyOrIsAdminUser(BasePermission): """Does the request have the current api key.""" @override def has_permission(self, request, view) -> bool: """Test the request api key against the database.""" data = request.GET if request.method == "GET" else request.POST api_key = data.get("apiKey") if not api_key: return IsAdminUser().has_permission(request, view) return Timestamp.objects.filter( key=Timestamp.Choices.API_KEY.value, version=api_key ).exists() ================================================ FILE: codex/views/admin/stats.py ================================================ """Admin Stats View.""" from types import MappingProxyType from typing import Any, ClassVar, override from drf_spectacular.utils import extend_schema from rest_framework.permissions import BasePermission from rest_framework.response import Response from rest_framework.serializers import empty from codex.librarian.telemeter.stats import CodexStats from codex.models.admin import Timestamp from codex.serializers.admin.stats import ( AdminStatsRequestSerializer, StatsSerializer, ) from codex.views.admin.auth import AdminGenericAPIView from codex.views.admin.permissions import HasAPIKeyOrIsAdminUser class AdminStatsView(AdminGenericAPIView): """Admin Flag Viewset.""" permission_classes: ClassVar[list[type[BasePermission]]] = [HasAPIKeyOrIsAdminUser] serializer_class = StatsSerializer input_serializer_class = AdminStatsRequestSerializer def __init__(self, *args, **kwargs) -> None: """Initialize properties.""" super().__init__(*args, **kwargs) self._params: MappingProxyType[str, Any] | None = None @property def params(self) -> MappingProxyType[str, Any]: """Parse and input params.""" if self._params is None: data = self.request.GET input_serializer = self.input_serializer_class(data=data) input_serializer.is_valid(raise_exception=True) self._params = MappingProxyType( { key: value for key, value in input_serializer.validated_data.items() if input_serializer.validated_data and not isinstance(input_serializer.validated_data, empty) } ) return self._params def _add_api_key(self, obj) -> None: """Add the api key to the config object if specified.""" request_counts = self.params.get("config", {}) if request_counts and ("apikey" not in request_counts): return api_key = Timestamp.objects.get(key=Timestamp.Choices.API_KEY.value).version if "config" not in obj: obj["config"] = {} obj["config"]["api_key"] = api_key @override def get_object(self) -> dict: """Get the stats object with an api key.""" getter = CodexStats(self.params) obj = getter.get() self._add_api_key(obj) return obj @extend_schema(parameters=[input_serializer_class]) def get(self, *_args, **_kwargs) -> Response: """Get the stats object and serialize it.""" obj = self.get_object() serializer = self.get_serializer(obj) return Response(serializer.data) ================================================ FILE: codex/views/admin/tasks.py ================================================ """Librarian Status View.""" from types import MappingProxyType from typing import TYPE_CHECKING from django.db.models.query_utils import Q from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.response import Response from codex.choices.notifications import Notifications from codex.librarian.bookmark.tasks import ( ClearLibrarianStatusTask, CodexLatestVersionTask, ) from codex.librarian.covers.tasks import ( CoverCreateAllTask, CoverRemoveAllTask, CoverRemoveOrphansTask, ) from codex.librarian.fs.poller.tasks import FSPollLibrariesTask from codex.librarian.fs.watcher.tasks import FSWatcherRestartTask from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.notifier.tasks import ( ADMIN_FLAGS_CHANGED_TASK, COVERS_CHANGED_TASK, FAILED_IMPORTS_CHANGED_TASK, GROUPS_CHANGED_TASK, LIBRARIAN_STATUS_TASK, LIBRARY_CHANGED_TASK, USERS_CHANGED_TASK, NotifierTask, ) from codex.librarian.restarter.tasks import CodexRestartTask, CodexShutdownTask from codex.librarian.scribe.janitor.tasks import ( JanitorAdoptOrphanFoldersTask, JanitorBackupTask, JanitorCleanCoversTask, JanitorCleanFKsTask, JanitorCleanupBookmarksTask, JanitorCleanupSessionsTask, JanitorCodexUpdateTask, JanitorForeignKeyCheckTask, JanitorFTSIntegrityCheckTask, JanitorFTSRebuildTask, JanitorImportForceAllFailedTask, JanitorIntegrityCheckTask, JanitorNightlyTask, JanitorVacuumTask, ) from codex.librarian.scribe.search.tasks import ( SearchIndexCleanStaleTask, SearchIndexClearTask, SearchIndexOptimizeTask, SearchIndexSyncTask, ) from codex.librarian.scribe.tasks import ( CleanupAbortTask, ImportAbortTask, SearchIndexSyncAbortTask, UpdateGroupsTask, ) from codex.librarian.tasks import LibrarianTask from codex.models import LibrarianStatus from codex.serializers.admin.tasks import AdminLibrarianTaskSerializer from codex.serializers.mixins import OKSerializer from codex.serializers.models.admin import LibrarianStatusSerializer from codex.views.admin.auth import AdminAPIView, AdminReadOnlyModelViewSet from codex.views.const import EPOCH_START if TYPE_CHECKING: from collections.abc import Mapping _TASK_MAP = MappingProxyType( { "purge_comic_covers": CoverRemoveAllTask(), "create_all_comic_covers": CoverCreateAllTask(), "search_index_update": SearchIndexSyncTask(rebuild=False), "search_index_rebuild": SearchIndexSyncTask(rebuild=True), "search_index_remove_stale": SearchIndexCleanStaleTask(), "import_abort": ImportAbortTask(), "search_index_abort": SearchIndexSyncAbortTask(), "cleanup_abort": CleanupAbortTask(), "search_index_optimize": SearchIndexOptimizeTask(), "search_index_clear": SearchIndexClearTask(), "db_vacuum": JanitorVacuumTask(), "db_backup": JanitorBackupTask(), "db_foreign_key_check": JanitorForeignKeyCheckTask(), "db_integrity_check": JanitorIntegrityCheckTask(), "db_fts_integrity_check": JanitorFTSIntegrityCheckTask(), "db_fts_rebuild": JanitorFTSRebuildTask(), "watcher_restart": FSWatcherRestartTask(), "codex_latest_version": CodexLatestVersionTask(force=True), "codex_update": JanitorCodexUpdateTask(force=False), "codex_shutdown": CodexShutdownTask(), "codex_restart": CodexRestartTask(), "notify_admin_flags_changed": ADMIN_FLAGS_CHANGED_TASK, "notify_covers_changed": COVERS_CHANGED_TASK, "notify_failed_imports_changed": FAILED_IMPORTS_CHANGED_TASK, "notify_groups_changed": GROUPS_CHANGED_TASK, "notify_library_changed": LIBRARY_CHANGED_TASK, "notify_librarian_status": LIBRARIAN_STATUS_TASK, "notify_users_changed": USERS_CHANGED_TASK, "cleanup_fks": JanitorCleanFKsTask(), "cleanup_db_custom_covers": JanitorCleanCoversTask(), "cleanup_sessions": JanitorCleanupSessionsTask(), "cleanup_bookmarks": JanitorCleanupBookmarksTask(), "cleanup_covers": CoverRemoveOrphansTask(), "librarian_clear_status": ClearLibrarianStatusTask(), "force_update_all_failed_imports": JanitorImportForceAllFailedTask(), "poll": FSPollLibrariesTask(frozenset(), force=False), "poll_force": FSPollLibrariesTask(frozenset(), force=True), "janitor_nightly": JanitorNightlyTask(), "force_update_groups": UpdateGroupsTask(start_time=EPOCH_START), "adopt_folders": JanitorAdoptOrphanFoldersTask(), } ) class AdminLibrarianStatusActiveViewSet(AdminReadOnlyModelViewSet): """Librarian Task Statuses (active/preactive only).""" queryset = LibrarianStatus.objects.filter( Q(preactive__isnull=False) | Q(active__isnull=False) ).order_by("preactive", "active", "pk") serializer_class = LibrarianStatusSerializer class AdminLibrarianStatusAllViewSet(AdminReadOnlyModelViewSet): """All Librarian Task Statuses including inactive (for Jobs tab).""" queryset = LibrarianStatus.objects.all().order_by("pk") serializer_class = LibrarianStatusSerializer class AdminLibrarianTaskView(AdminAPIView): """Queue Librarian Jobs.""" input_serializer_class = AdminLibrarianTaskSerializer serializer_class = OKSerializer def _get_task(self, name, pk) -> LibrarianTask | None: """Stuff library ids into tasks.""" if name == "notify_bookmark_changed": uid = self.request.user.pk group = f"user_{uid}" task = NotifierTask(Notifications.BOOKMARK.value, group) else: task = _TASK_MAP.get(name) if pk and isinstance(task, FSPollLibrariesTask): task.library_ids = frozenset({pk}) return task @extend_schema(request=input_serializer_class) def post(self, *_args, **_kwargs) -> Response: """Download a comic archive.""" # DRF does not populate POST correctly, only data data = self.request.data serializer = self.input_serializer_class(data=data) serializer.is_valid(raise_exception=True) validated_data: Mapping = serializer.validated_data task_name = validated_data.get("task") pk = validated_data.get("library_id") task = self._get_task(task_name, pk) if task: LIBRARIAN_QUEUE.put(task) task_log = task_name or "Unknown" if pk is not None: task_log += f" {pk}" logger.debug(f"Admin task submitted {task_log}") else: reason = f"Unknown admin library task_name: {task_name}" logger.warning(reason) raise ValueError(reason) serializer = self.serializer_class() return Response(serializer.data) ================================================ FILE: codex/views/admin/user.py ================================================ """Admin User ViewSet.""" from typing import override from django.contrib.auth.models import User from django.contrib.auth.password_validation import validate_password from django.core.exceptions import ValidationError from rest_framework.authtoken.models import Token from rest_framework.response import Response from rest_framework.status import HTTP_202_ACCEPTED, HTTP_400_BAD_REQUEST from codex.choices.notifications import Notifications from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.notifier.tasks import ( ADMIN_USERS_CHANGED_TASK, USERS_CHANGED_TASK, NotifierTask, ) from codex.serializers.admin.users import UserChangePasswordSerializer, UserSerializer from codex.views.admin.auth import AdminGenericAPIView, AdminModelViewSet _BAD_CURRENT_USER_FALSE_KEYS = ("is_active", "is_staff", "is_superuser") class AdminUserViewSet(AdminModelViewSet): """User ViewSet.""" queryset = ( User.objects.prefetch_related("groups") .select_related("useractive") .defer("first_name", "last_name", "email") ) serializer_class = UserSerializer INPUT_METHODS = ("POST", "PUT") @staticmethod def _on_change(uid: int) -> None: if uid: group = f"user_{uid}" tasks = ( ADMIN_USERS_CHANGED_TASK, NotifierTask(Notifications.USERS.value, group), ) else: tasks = (USERS_CHANGED_TASK,) for task in tasks: LIBRARIAN_QUEUE.put(task) @override def get_serializer(self, *args, **kwargs): """Allow partial data for update methods.""" if self.request.method in self.INPUT_METHODS: kwargs["partial"] = True return super().get_serializer(*args, **kwargs) def _is_change_to_current_user(self) -> bool: instance = self.get_object() return instance == self.request.user @override def destroy(self, request, *args, **kwargs) -> Response: """Destroy with guard for logged in user.""" if self._is_change_to_current_user(): reason = "Cannot delete logged in user." raise ValidationError(reason) res = super().destroy(request, *args, **kwargs) self._on_change(0) return res @override def perform_update(self, serializer) -> None: """Add hook after update.""" data = serializer.validated_data if not data.get("password"): data.pop("password", None) if self._is_change_to_current_user() and False in { data.get(key) for key in _BAD_CURRENT_USER_FALSE_KEYS }: reason = "Cannot deactivate logged in user." raise ValidationError(reason) uid = self.kwargs.get("pk", 0) super().perform_update(serializer) self._on_change(uid) @override def perform_create(self, serializer) -> None: """Create user.""" validated_data = serializer.validated_data password = validated_data["password"] validate_password(password) groups = validated_data.pop("groups") validated_data["email"] = "" user = User.objects.create_user(**validated_data) if groups: user.groups.set(groups) user.save() Token.objects.create(user=user) class AdminUserChangePasswordView(AdminGenericAPIView): """Special View to hash user password.""" serializer_class = UserChangePasswordSerializer def put(self, request, *args, **kwargs) -> Response: """Validate and set the user password.""" try: serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) pk = self.kwargs["pk"] user = User.objects.get(pk=pk) password = serializer.validated_data["password"] validate_password(password, user=user) user.set_password(password) user.save() status = HTTP_202_ACCEPTED detail = "Successfully changed password" except ValidationError as exc: status = HTTP_400_BAD_REQUEST detail = exc.error_list return Response( status=status, data={"detail": detail}, ) ================================================ FILE: codex/views/auth.py ================================================ """Views authorization bases.""" from collections.abc import Sequence from typing import override from django.contrib.auth.models import AnonymousUser from django.db.models.query_utils import Q from loguru import logger from rest_framework.authtoken.models import Token from rest_framework.authtoken.serializers import AuthTokenSerializer from rest_framework.exceptions import NotAuthenticated from rest_framework.generics import GenericAPIView from rest_framework.permissions import BasePermission, IsAuthenticated from rest_framework.response import Response from rest_framework.views import APIView from codex.choices.admin import AdminFlagChoices from codex.models import AdminFlag, Comic, Folder, StoryArc class IsAuthenticatedOrEnabledNonUsers(IsAuthenticated): """Custom DRF Authentication class.""" code = 401 @override def has_permission(self, request, view) -> bool: """Return True if ENABLE_NON_USERS is true or user authenticated.""" enu_flag = AdminFlag.objects.only("on").get( key=AdminFlagChoices.NON_USERS.value ) if enu_flag.on: return True return super().has_permission(request, view) class AuthMixin: """General Auth Policy.""" permission_classes: Sequence[type[BasePermission]] = ( IsAuthenticatedOrEnabledNonUsers, ) class AuthAPIView(AuthMixin, APIView): # pyright: ignore[reportIncompatibleVariableOverride] """Auth Policy APIView.""" class AuthGenericAPIView(AuthMixin, GenericAPIView): # pyright: ignore[reportIncompatibleVariableOverride] """Auth Policy GenericAPIView.""" class GroupACLMixin: """Filter group mixin for views and threads.""" def init_group_acl(self) -> None: """Initialize properties.""" self._is_admin: bool | None = None # pyright: ignore[reportUninitializedInstanceVariable] @property def is_admin(self) -> bool: """Is the current user an admin.""" if self._is_admin is None: user = self.request.user # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] self._is_admin = bool(user and getattr(user, "is_staff", False)) return self._is_admin @staticmethod def get_rel_prefix(model) -> str: """Return the relation prefix for most fields.""" prefix = "" if model is Comic: return prefix if model is StoryArc: prefix += "storyarcnumber__" prefix += "comic__" return prefix @classmethod def get_group_acl_filter(cls, model, user) -> Q: """Generate the group acl filter for comics.""" # The rel prefix groups_rel = cls.get_rel_prefix(model) if model is not Folder else "" groups_rel += "library__groups" # Libraries in no groups are visible to everyone ungrouped_filter = {f"{groups_rel}__isnull": True} q = Q(**ungrouped_filter) if not user or isinstance(user, AnonymousUser): return q # If logged in, see which libraries are now visible. user_filter = {f"{groups_rel}__user": user} exclude_rel = f"{groups_rel}__groupauth__exclude" # Include groups are visible to users in the group include_filter = {exclude_rel: False} include_filter.update(user_filter) include_q = Q(**include_filter) # Exclude groups are visible to users NOT in the group exclude_filter = {exclude_rel: True} exclude_filter.update(user_filter) exclude_q = Q(**exclude_filter) q |= include_q & ~exclude_q return q class AuthFilterGenericAPIView(AuthGenericAPIView, GroupACLMixin): """Auth Enabled GenericAPIView.""" def __init__(self, *args, **kwargs) -> None: """Iniit acl properties.""" super().__init__(*args, **kwargs) self.init_group_acl() class AuthFilterAPIView(AuthAPIView, GroupACLMixin): """Auth Enabled APIView.""" def __init__(self, *args, **kwargs) -> None: """Iniit acl properties.""" super().__init__(*args, **kwargs) self.init_group_acl() class AuthToken(AuthGenericAPIView): """Auth Token creation and getting.""" serializer_class = AuthTokenSerializer def get(self, *args, **kwargs) -> Response: """Get auth token.""" user = self.request.user if not user: reason = "Not an authenticated user." raise NotAuthenticated(detail=reason) token, created = Token.objects.get_or_create(user=user) if created: logger.info("Auth Token created for user {self.user}") data = {"token": token.key} return Response(data) def put(self, *args, **kwargs) -> Response: """Reset auth token for user.""" user = self.request.user if not user: reason = "Not an authenticated user." raise NotAuthenticated(detail=reason) Token.objects.filter(user=user).delete() token, _ = Token.objects.get_or_create(user=user) logger.info("Auth Token updated for user {self.user}") data = {"token": token.key} return Response(data) ================================================ FILE: codex/views/bookmark.py ================================================ """Views for reading comic books.""" from abc import ABC from typing import TYPE_CHECKING from django.db.models.query_utils import Q from loguru import logger from rest_framework.response import Response from codex.librarian.bookmark.tasks import BookmarkUpdateTask from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.views.auth import AuthAPIView, GroupACLMixin if TYPE_CHECKING: from rest_framework.request import Request from codex.models import BrowserGroupModel class BookmarkFilterMixin(GroupACLMixin, ABC): """Bookmark filter methods.""" def init_bookmark_filter(self) -> None: """Initialize the bm_annotation_data.""" if TYPE_CHECKING: self.request: Request self._bm_rels: dict[BrowserGroupModel, str] = {} self._bm_filters: dict[BrowserGroupModel, Q] = {} def get_bm_rel(self, model): """Create bookmark relation.""" if model not in self._bm_rels: rel_prefix = self.get_rel_prefix(model) self._bm_rels[model] = rel_prefix + "bookmark" return self._bm_rels[model] def get_my_bookmark_filter(self, bm_rel) -> Q: """Get a filter for my session or user defined bookmarks.""" if self.request.user and self.request.user.is_authenticated: key = f"{bm_rel}__user" value = self.request.user else: key = f"{bm_rel}__session" value = self.request.session.session_key my_bookmarks_kwargs = {key: value} return Q(**my_bookmarks_kwargs) class BookmarkAuthMixin: """Base class for Bookmark Views.""" def get_bookmark_auth_filter(self) -> dict[str, int | str | None]: """Filter only the user's bookmarks.""" if TYPE_CHECKING: self.request: Request # pyright: ignore[reportUninitializedInstanceVariable] if self.request.user.is_authenticated: key = "user_id" value = self.request.user.pk else: if not self.request.session or not self.request.session.session_key: logger.debug("no session, make one") self.request.session.save() key = "session_id" value = self.request.session.session_key return {key: value} class BookmarkPageMixin(BookmarkAuthMixin): """Update the bookmark if the bookmark param was passed.""" def update_bookmark(self) -> None: """Update the bookmark if the bookmark param was passed.""" if TYPE_CHECKING: self.kwargs: dict # pyright: ignore[reportUninitializedInstanceVariable] auth_filter = self.get_bookmark_auth_filter() comic_pks = [] if comic_pk := self.kwargs.get("pk"): comic_pks.append(comic_pk) comic_pks = tuple(comic_pks) page = self.kwargs.get("page") updates = {"page": page} task = BookmarkUpdateTask(auth_filter, comic_pks, updates) LIBRARIAN_QUEUE.put(task) class BookmarkPageView(BookmarkPageMixin, AuthAPIView): """Display a comic page from the archive itself.""" def put(self, *_args, **_kwargs) -> Response: """Update the bookmark.""" self.update_bookmark() return Response() ================================================ FILE: codex/views/browser/__init__.py ================================================ """Browser views.""" ================================================ FILE: codex/views/browser/annotate/__init__.py ================================================ """Browser annotation methods.""" ================================================ FILE: codex/views/browser/annotate/bookmark.py ================================================ """Base view for metadata annotations.""" from django.db.models import ( Case, F, Sum, Value, When, ) from django.db.models.fields import BooleanField, PositiveSmallIntegerField from django.db.models.functions import Least from django.db.models.functions.comparison import Coalesce, Greatest from codex.models import ( Comic, ) from codex.models.functions import JsonGroupArray from codex.views.browser.annotate.order import BrowserAnnotateOrderView class BrowserAnnotateBookmarkView(BrowserAnnotateOrderView): """Base class for views that need special metadata annotations.""" def _get_group_bookmark_page_annotation( self, qs, bm_rel, bm_filter, page_rel, finished_rel ) -> Sum: """Get bookmark page subquery.""" finished_filter = {finished_rel: True} prefix = "" if qs.model is Comic else self.rel_prefix page_count = prefix + "page_count" # Can't use a filtered relation for page & finished because of this # page_count case. bookmark_page_case = Case( When(**{bm_rel: None}, then=0), When(**finished_filter, then=page_count), default=page_rel, output_field=PositiveSmallIntegerField(), ) return Sum( bookmark_page_case, default=0, filter=bm_filter, output_field=PositiveSmallIntegerField(), distinct=True, ) @classmethod def _get_group_bookmark_finished_annotation( cls, qs, bm_filter, finished_rel ) -> tuple: """Get finished_count subquery.""" finished_count = Sum( finished_rel, default=0, filter=bm_filter, output_field=PositiveSmallIntegerField(), # distinct breaks this sum and only returns one. idk why. ) qs = qs.alias(finished_count=finished_count) finished_aggregate = Case( When(finished_count=F("child_count"), then=True), When(finished_count=0, then=False), default=None, output_field=BooleanField(), ) return qs, finished_aggregate def annotate_bookmarks(self, qs): """Hoist up bookmark annotations.""" bm_rel = self.get_bm_rel(qs.model) bm_filter = self.get_my_bookmark_filter(bm_rel) page_rel = f"{bm_rel}__page" finished_rel = f"{bm_rel}__finished" if qs.model is Comic: bookmark_page = Sum(page_rel, filter=bm_filter, default=0) finished_aggregate = Sum(finished_rel, filter=bm_filter, default=False) else: bookmark_page = self._get_group_bookmark_page_annotation( qs, bm_rel, bm_filter, page_rel, finished_rel ) qs, finished_aggregate = self._get_group_bookmark_finished_annotation( qs, bm_filter, finished_rel ) if ( (self.TARGET == "browser" and qs.model is Comic) or self.is_opds_acquisition or self.TARGET == "metadata" ): qs = qs.annotate(page=bookmark_page) else: qs = qs.alias(page=bookmark_page) qs = qs.annotate(finished=finished_aggregate) if not self.bmua_is_max: mbmua = self.get_max_bookmark_updated_at_aggregate(qs.model, JsonGroupArray) qs = qs.annotate(bookmark_updated_ats=mbmua) return qs def annotate_progress(self, qs): """Compute progress for each member of a qs.""" # Requires bookmark and annotation hoisted from bookmarks. # Requires page_count native to comic or aggregated # Page counts can be null with metadata turned off. # Least guard is for rare instances when bookmarks are set to # invalid high values progress = Least( Coalesce(F("page"), 0) * 100.0 / Greatest(Coalesce(F("page_count"), 1) - 1, 1), Value(100.0), ) return qs.annotate(progress=progress) ================================================ FILE: codex/views/browser/annotate/card.py ================================================ """Base view for metadata annotations.""" from types import MappingProxyType from django.db.models import ( F, Value, ) from django.db.models.fields import CharField from codex.models.comic import Comic from codex.models.functions import JsonGroupArray from codex.models.groups import BrowserGroupModel, Imprint, Publisher, Series, Volume from codex.models.named import StoryArc from codex.views.browser.annotate.bookmark import BrowserAnnotateBookmarkView _GROUP_BY: MappingProxyType[type[BrowserGroupModel], tuple[str, ...]] = ( MappingProxyType( { Publisher: ("sort_name",), Imprint: ("sort_name",), Series: ("sort_name",), Volume: ("name", "number_to"), StoryArc: ("sort_name",), } ) ) class BrowserAnnotateCardView(BrowserAnnotateBookmarkView): """Base class for views that need special metadata annotations.""" def add_group_by(self, qs): """Get the group by for the model.""" # this method is here because this is class is what metadata imports if group_by := _GROUP_BY.get(qs.model): qs = qs.group_by(*group_by) return qs def _annotate_group(self, qs): """Annotate Group.""" value = "c" if qs.model is Comic else self.model_group return qs.annotate(group=Value(value, CharField(max_length=1))) def _annotate_file_name(self, qs): """Annotate the file name for folder view.""" if qs.model is not Comic: return qs if self.order_key == "filename": file_name = F("filename") else: file_name = self.get_filename_func(qs.model) return qs.annotate(file_name=file_name) def _annotate_has_metadata(self, qs): """Annotate if we have metadata.""" if qs.model is Comic: qs = qs.annotate(has_metadata=F("metadata_mtime")) return qs def annotate_card_aggregates(self, qs): """Annotate aggregates that appear the browser card.""" if qs.model is Comic: # comic adds order_value for cards late qs = self.annotate_order_value(qs) qs = self._annotate_group(qs) qs = self.annotate_group_names(qs) qs = self._annotate_file_name(qs) qs = self.annotate_child_count(qs) qs = self.annotate_bookmarks(qs) qs = self.annotate_progress(qs) qs = self._annotate_has_metadata(qs) # For group models, traverse to Comic.updated_at via rel_prefix. # The group model's own updated_at is not reliably refreshed by # bulk_update / bulk_create(update_conflicts) because auto_now # only fires on Model.save(). prefix = "" if qs.model is Comic else self.rel_prefix updated_at_field = prefix + "updated_at" return qs.annotate( updated_ats=JsonGroupArray( updated_at_field, distinct=True, order_by=updated_at_field ) ) ================================================ FILE: codex/views/browser/annotate/order.py ================================================ """Base view for metadata annotations.""" from os import sep from types import MappingProxyType from django.db.models import ( F, FilteredRelation, Q, QuerySet, Value, ) from django.db.models.aggregates import Avg, Count, Max, Min, Sum from django.db.models.fields import CharField from django.db.models.functions import Reverse, Right, StrIndex from codex.models import ( Comic, Folder, StoryArc, ) from codex.models.functions import ComicFTSRank, JsonGroupArray from codex.views.browser.order_by import ( BrowserOrderByView, ) from codex.views.const import ( NONE_INTEGERFIELD, STORY_ARC_GROUP, ) from codex.views.mixins import SharedAnnotationsMixin _ORDER_AGGREGATE_FUNCS = MappingProxyType( # These are annotated to order_value because they're simple relations { "age_rating": Avg, "child_count": Min, "created_at": Min, "critical_rating": Avg, "date": Min, "page_count": Sum, "size": Sum, "updated_at": Min, } ) _ANNOTATED_ORDER_FIELDS = frozenset( # These are annotated with their own functions { "bookmark_updated_at", "child_count", "filename", "search_score", "sort_name", "story_arc_number", } ) class BrowserAnnotateOrderView(BrowserOrderByView, SharedAnnotationsMixin): """Base class for views that need special metadata annotations.""" CARD_TARGETS = frozenset({"browser", "metadata"}) _OPDS_TARGETS = frozenset({"opds1", "opds2"}) _PAGE_COUNT_TARGETS = frozenset(CARD_TARGETS | _OPDS_TARGETS) _COVER_AND_CARD_TARGETS = frozenset(CARD_TARGETS | {"cover"}) def __init__(self, *args, **kwargs) -> None: """Set params for the type checker.""" super().__init__(*args, **kwargs) self._order_agg_func: type[Min | Max] | None = None self._is_opds_acquisition: bool | None = None self._opds_acquisition_groups: frozenset[str] | None = None self.bmua_is_max = False self._child_count_annotated = False @property def opds_acquisition_groups(self): """Memoize the opds acquisition groups.""" if self._opds_acquisition_groups is None: groups = {"a", "f", "c"} groups |= {*self.valid_nav_groups[-2:]} self._opds_acquisition_groups = frozenset(groups) return self._opds_acquisition_groups @property def is_opds_acquisition(self) -> bool: """Memoize if we're in an opds acquisition view.""" if self._is_opds_acquisition is None: is_opds_acquisition = self.TARGET in self._OPDS_TARGETS if is_opds_acquisition: group = self.kwargs.get("group") is_opds_acquisition &= group in self.opds_acquisition_groups if is_opds_acquisition and group == "a": pks = self.kwargs["pks"] is_opds_acquisition &= bool(pks and 0 not in pks) self._is_opds_acquisition = is_opds_acquisition return self._is_opds_acquisition @property def order_agg_func(self): """Get the order aggregate function.""" if self._order_agg_func is None: order_reverse = self.params.get("order_reverse") self._order_agg_func = Max if order_reverse else Min return self._order_agg_func def _alias_sort_names(self, qs): """Annotate sort_name.""" if self.order_key != "sort_name" and not ( qs.model is StoryArc and self.order_key == "story_arc_number" ): return qs group = self.kwargs.get("group") pks = self.kwargs.get("pks") show = MappingProxyType(self.params["show"]) sort_name_annotations = self.get_sort_name_annotations( qs.model, group, pks, show ) if sort_name_annotations: qs = qs.alias(**sort_name_annotations) if qs.model is Comic: self._comic_sort_names = tuple(sort_name_annotations.keys()) return qs def get_filename_func(self, model) -> Right: """Get the filename creation function.""" prefix = "" if model == Comic else self.rel_prefix path_rel = prefix + "path" return Right( path_rel, StrIndex( # pyright: ignore[reportArgumentType],# ty: ignore[invalid-argument-type] Reverse(F(path_rel)), Value(sep), ) - 1, output_field=CharField(), ) def _alias_filename(self, qs): """Calculate filename from path in the db.""" if self.order_key != "filename": return qs if qs.model is Folder: filename = F("name") else: filename_func = self.get_filename_func(qs.model) filename = self.order_agg_func(filename_func) return qs.alias(filename=filename) def _alias_story_arc_number(self, qs): if self.order_key != "story_arc_number": return qs # Get story_arc__pk group = self.kwargs["group"] pks = self.kwargs["pks"] if group == STORY_ARC_GROUP and pks: story_arc_pks = pks else: story_arc_pks = self.params.get("filters", {}).get("story_arcs", ()) # If we have one annotate it. if story_arc_pks: rel = self.get_rel_prefix(qs.model) + "story_arc_numbers" condition_rel = "pk" if qs.model is StoryArc else rel + "__story_arc" condition = Q(**{f"{condition_rel}__in": story_arc_pks}) qs = qs.alias( selected_story_arc_number=FilteredRelation(rel, condition=condition), ) story_arc_number = self.order_agg_func("selected_story_arc_number__number") else: story_arc_number = NONE_INTEGERFIELD return qs.alias(story_arc_number=story_arc_number) def _annotate_page_count(self, qs): """Hoist up total page_count of children.""" # Used for sorting and progress if qs.model is Comic or ( self.order_key != "page_count" and self.TARGET not in self._PAGE_COUNT_TARGETS ): return qs rel = self.rel_prefix + "page_count" page_count_sum = Sum(rel, distinct=True) if self.TARGET == "browser": qs = qs.alias(page_count=page_count_sum) else: qs = qs.annotate(page_count=page_count_sum) return qs def _annotate_bookmark_updated_at(self, qs) -> QuerySet: if self.is_opds_acquisition or self.order_key == "bookmark_updated_at": bmua_agg = self.get_max_bookmark_updated_at_aggregate( qs.model, agg_func=self.order_agg_func ) # This is used by annotate.bookmark to avoid a # similar query. self.bmua_is_max = self.order_agg_func is Max qs = qs.annotate(bookmark_updated_at=bmua_agg) # This is used by the serializer to compute mtime return qs.annotate(bmua_is_max=Value(self.bmua_is_max)) def _annotate_search_scores(self, qs): """Annotate Search Scores.""" if ( self.TARGET not in self._COVER_AND_CARD_TARGETS or self.order_key != "search_score" ): return qs # Rank is always the max of the relations, cannot aggregate? # group by here fixes duplicates with story_arc, probably because it's a long relation return qs.annotate(search_score=ComicFTSRank()).group_by("id") def annotate_child_count(self, qs): """Annotate child count.""" if qs.model is Comic or self._child_count_annotated: return qs rel = self.rel_prefix + "pk" count_func = Count(rel, distinct=True) ann = {"child_count": count_func} qs = qs.alias(**ann) if self.TARGET == "opds2" else qs.annotate(**ann) self._child_count_annotated = True return qs def _annotate_order_child_count(self, qs): """Annotate child count for order.""" if self.order_key != "child_count": return qs return self.annotate_child_count(qs) def annotate_order_value(self, qs): """Annotate a main key for sorting and browser card display.""" # Determine order func if self.TARGET == "metadata": return qs if qs.model is Folder and self.order_key == "filename": order_value = F("name") elif qs.model is Comic: order_key = ( "sort_name" if self.order_key == "child_count" else self.order_key ) order_value = F(order_key) elif self.order_key in _ANNOTATED_ORDER_FIELDS: # These are annotated in browser_annotations order_value = F(self.order_key) else: agg_func = _ORDER_AGGREGATE_FUNCS[self.order_key] agg_func = self.order_agg_func if agg_func == Min else agg_func field = self.rel_prefix + self.order_key order_value = agg_func(field) if self.TARGET == "browser": qs = qs.annotate(order_value=order_value) else: qs = qs.alias(order_value=order_value) return qs def annotate_order_aggregates(self, qs: QuerySet): """Annotate common aggregates between browser and metadata.""" qs = qs.annotate(ids=JsonGroupArray("id", distinct=True, order_by="id")) qs = self._annotate_search_scores(qs) qs = self._alias_sort_names(qs) qs = self._alias_filename(qs) qs = self._alias_story_arc_number(qs) qs = self._annotate_page_count(qs) qs = self._annotate_bookmark_updated_at(qs) qs = self._annotate_order_child_count(qs) if qs.model is not Comic: # comic orders on indexed fields when it can qs = self.annotate_order_value(qs) return qs ================================================ FILE: codex/views/browser/bookmark.py ================================================ """Bookmark view.""" from types import MappingProxyType from typing import override from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.librarian.bookmark.update import BookmarkUpdateMixin from codex.models.comic import Comic from codex.serializers.models.bookmark import ( BookmarkFinishedSerializer, BookmarkSerializer, ) from codex.views.bookmark import BookmarkAuthMixin from codex.views.browser.filters.filter import BrowserFilterView class BookmarkView(BookmarkUpdateMixin, BookmarkAuthMixin, BrowserFilterView): """User Bookmark View.""" serializer_class: type[BaseSerializer] | None = BookmarkSerializer TARGET: str = "bookmark" def __init__(self, *args, **kwargs) -> None: """Init acl properties.""" super().__init__(*args, **kwargs) self.init_group_acl() def _parse_params(self): """Validate and translate the submitted data.""" group = self.kwargs.get("group") # If the target is recursive, strip everything but finished state data. serializer_class = None if group == "c" else BookmarkFinishedSerializer data = self.request.data if serializer_class: serializer = serializer_class(data=data, partial=True) else: serializer = self.get_serializer(data=data, partial=True) serializer.is_valid(raise_exception=True) return serializer.validated_data def _get_comic_query(self): """Get comic pks for group.""" group = self.kwargs.get("group") pks = self.kwargs.get("pks") return self.get_filtered_queryset(Comic, group=group, pks=pks).only("pk") @extend_schema(request=serializer_class, responses=None) def patch(self, *_args, **_kwargs) -> Response: """Update bookmarks recursively.""" updates = self._parse_params() auth_filter = self.get_bookmark_auth_filter() comic_qs = self._get_comic_query() self.update_bookmarks(auth_filter, comic_qs, updates) return Response() @property @override def params(self): """Retrieve params but don't save them.""" if self._params is None: params = self.load_params_from_settings() self._params = MappingProxyType(params) return self._params ================================================ FILE: codex/views/browser/breadcrumbs.py ================================================ """Browser breadcrumbs calculations.""" from types import MappingProxyType from typing import TYPE_CHECKING from django.db.models import QuerySet from codex.models import ( BrowserGroupModel, Comic, Imprint, Series, Volume, ) from codex.models.groups import Publisher from codex.views.browser.paginate import BrowserPaginateView from codex.views.const import ( FOLDER_GROUP, GROUP_MODEL_MAP, STORY_ARC_GROUP, ) from codex.views.util import Route if TYPE_CHECKING: from codex.models.groups import Folder _GROUP_INSTANCE_SELECT_RELATED: MappingProxyType[ type[BrowserGroupModel], tuple[str, ...] ] = MappingProxyType( { Comic: ("series", "volume"), Volume: ("series", "imprint", "publisher"), Series: ("imprint", "publisher"), Imprint: ("publisher",), } ) # Map from group letter to the FK attribute chain for walking up the hierarchy. # Each entry is (parent_group_letter, attribute_on_instance). _GROUP_PARENT_CHAIN: MappingProxyType[str, tuple[tuple[str, str], ...]] = ( MappingProxyType( { "v": (("s", "series"), ("i", "imprint"), ("p", "publisher")), "s": (("i", "imprint"), ("p", "publisher")), "i": (("p", "publisher"),), "p": (), } ) ) class BrowserBreadcrumbsView(BrowserPaginateView): """Browser breadcrumbs calculations.""" def __init__(self, *args, **kwargs) -> None: """Set params for the type checker.""" super().__init__(*args, **kwargs) # Use 0 to indicate unmemoized because None is a valid value self._group_instance: BrowserGroupModel | None | int = 0 def _get_group_query(self, model): """Get the group query for the group instance.""" pks = self.kwargs.get("pks") qs = model.objects.filter(pk__in=pks) if select_related := _GROUP_INSTANCE_SELECT_RELATED.get(model): qs = qs.select_related(*select_related) order_by = "name" if model is Volume else "sort_name" return qs.order_by(order_by) def _handle_group_query_missing_model(self, model) -> QuerySet: """Handle a missing model for the group instance.""" group = self.kwargs.get("group") pks = self.kwargs.get("pks") page = self.kwargs.get("page") if group == "r" and not pks and page == 1: group_query = model.objects.none() else: reason = f"{group}__in={pks} does not exist!" self.raise_redirect( reason, route_mask={"group": group}, ) return group_query # pyright: ignore[reportPossiblyUnboundVariable] @property def group_instance(self) -> BrowserGroupModel | None: """Memoize group instance for getting group names & counts.""" if self._group_instance == 0: group = self.kwargs.get("group") model = GROUP_MODEL_MAP[group] pks = self.kwargs.get("pks") if model and pks and 0 not in pks: try: group_query = self._get_group_query(model) except model.DoesNotExist: group_query = self._handle_group_query_missing_model(model) else: if not model: model = Publisher group_query = model.objects.none() self._group_instance = group_query.first() return self._group_instance # pyright: ignore[reportReturnType], # ty: ignore[invalid-return-type] def _build_group_breadcrumbs(self) -> tuple[Route, ...]: """Build breadcrumbs for browse group mode by walking FK parents.""" gi = self.group_instance group = self.kwargs["group"] pks = self.kwargs["pks"] page = self.kwargs["page"] if not gi: return (Route("r", (), 1, ""),) # Start with current crumb crumbs: list[Route] = [Route(group, pks, page, gi.name)] # Walk up the parent chain via FKs vng = self.valid_nav_groups parent_chain = _GROUP_PARENT_CHAIN.get(group, ()) for parent_group, attr in parent_chain: if parent_group not in vng: continue if parent := getattr(gi, attr, None): crumbs.append(Route(parent_group, (parent.pk,), 1, parent.name)) else: crumbs.append(Route(parent_group, (), 1, "")) # Always add root crumbs.append(Route("r", (), 1, "")) crumbs.reverse() return tuple(crumbs) def _build_folder_breadcrumbs(self) -> tuple[Route, ...]: """Build breadcrumbs for folder mode by walking parent_folder FKs.""" pks = self.kwargs["pks"] page = self.kwargs["page"] folder: Folder | None = self.group_instance # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] name = folder.name if folder and pks else "" crumbs: list[Route] = [Route(FOLDER_GROUP, pks, page, name)] # Walk up the parent_folder chain while folder and folder.parent_folder: folder = folder.parent_folder crumbs.append(Route(FOLDER_GROUP, (folder.pk,), 1, folder.name)) # pyright: ignore[reportOptionalMemberAccess] # Add folder root if not already there if crumbs[-1].pks: crumbs.append(Route(FOLDER_GROUP, (), 1, "")) crumbs.reverse() return tuple(crumbs) def _build_story_arc_breadcrumbs(self) -> tuple[Route, ...]: """Build breadcrumbs for story arc mode.""" pks = self.kwargs["pks"] page = self.kwargs["page"] gi = self.group_instance name = gi.name if gi else "" crumbs: list[Route] = [Route(STORY_ARC_GROUP, pks, page, name)] # Add story arc root if viewing a specific arc if pks and 0 not in pks: crumbs.insert(0, Route(STORY_ARC_GROUP, (), 1, "")) return tuple(crumbs) def get_breadcrumbs(self) -> tuple[Route, ...]: """Compute breadcrumbs by browser mode from FK hierarchy.""" group = self.kwargs["group"] if group == FOLDER_GROUP: return self._build_folder_breadcrumbs() if group == STORY_ARC_GROUP: return self._build_story_arc_breadcrumbs() return self._build_group_breadcrumbs() ================================================ FILE: codex/views/browser/browser.py ================================================ """Views for browsing comic library.""" from math import ceil, floor, log10 from types import MappingProxyType from typing import override from django.db.models import Max from django.db.utils import OperationalError from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.choices.admin import AdminFlagChoices from codex.models import ( Comic, Folder, Library, ) from codex.serializers.browser.page import BrowserPageSerializer from codex.settings import BROWSER_MAX_OBJ_PER_PAGE from codex.views.browser.title import BrowserTitleView from codex.views.const import ( COMIC_GROUP, FOLDER_GROUP, STORY_ARC_GROUP, ) class BrowserView(BrowserTitleView): """Browse comics with a variety of filters and sorts.""" serializer_class: type[BaseSerializer] | None = BrowserPageSerializer ADMIN_FLAGS = ( AdminFlagChoices.FOLDER_VIEW, AdminFlagChoices.IMPORT_METADATA, ) TARGET: str = "browser" ######## # Init # ######## @property @override def model_group(self): """Get the group of the models to browse.""" # the model group shown must be: # A valid nav group or 'c' # the child of the current nav group or 'c' if not self._model_group: group = self.kwargs["group"] if group == FOLDER_GROUP: self._model_group = group elif group == STORY_ARC_GROUP: pks = self.kwargs.get("pks") self._model_group = COMIC_GROUP if pks else group elif group == self.valid_nav_groups[-1] or group == COMIC_GROUP: # special case for lowest valid group self._model_group = COMIC_GROUP else: self._model_group = self.valid_nav_groups[ self.valid_nav_groups.index(group) + 1 ] return self._model_group ################ # MAIN QUERIES # ################ def _get_limit(self): """Get the limit for the query.""" # query_limit only is set by some opds views query_limit = self.params.get("limit", 0) search_limit = self.get_search_limit() if query_limit and search_limit: limit = min(query_limit, search_limit) else: limit = max(query_limit, search_limit) return limit def _get_common_queryset(self, model) -> tuple: """Create queryset common to group & books.""" qs = self.get_filtered_queryset(model) limit = self._get_limit() try: count_qs = self.add_group_by(qs) if limit: count_qs = count_qs[:limit] # Get count after filters and before any annotations or orders # because it's faster. These counts are used by # is_page_in_bounds(), num_pages for the nav bar, and paginate() count = count_qs.count() except OperationalError as exc: self._handle_operational_error(exc) count = 0 qs = model.objects.none() if count: qs = self.annotate_order_aggregates(qs) qs = self.add_order_by(qs) if limit: qs = qs[:limit] else: qs = qs.order_by("pk") return qs, count def _get_group_queryset(self) -> tuple: """Create group queryset.""" if self.model is Comic: qs = self.model.objects.none().order_by("pk") count = 0 else: qs, count = self._get_common_queryset(self.model) qs = self.add_group_by(qs) return qs, count def _get_book_queryset(self) -> tuple: """Create book queryset.""" if self.model in (Comic, Folder): qs, count = self._get_common_queryset(Comic) else: qs = Comic.objects.none().order_by("pk") count = 0 return qs, count @staticmethod def _get_zero_pad(book_qs) -> int: """Get the zero padding for the display.""" issue_number_max = book_qs.only("issue_number").aggregate(Max("issue_number"))[ "issue_number__max" ] zero_pad = 1 if issue_number_max: zero_pad += floor(log10(issue_number_max)) return zero_pad def _get_page_mtime(self): return self.get_group_mtime(self.model, page_mtime=True) def _debug_queries(self, group_count, book_count, group_qs, book_qs) -> None: """Log query details.""" if group_count: logger.debug(group_qs.explain()) logger.debug(group_qs.query) if book_count: logger.debug(book_qs.explain()) logger.debug(book_qs.query) def get_book_qs(self) -> tuple: """Only get the book queryset.""" book_qs, book_count = self._get_book_queryset() if book_count: # select_related volume would be appropriate but opds doesn't need it. book_qs = book_qs.select_related("series") zero_pad = self._get_zero_pad(book_qs) book_qs = self.annotate_card_aggregates(book_qs) book_qs = self.force_inner_joins(book_qs) else: zero_pad = 0 return book_qs, book_count, zero_pad def _get_group_and_books(self) -> tuple: """Create the main queries with filters, annotation and pagination.""" group_qs, group_count = self._get_group_queryset() book_qs, book_count = self._get_book_queryset() # Paginate num_pages = ceil((group_count + book_count) / BROWSER_MAX_OBJ_PER_PAGE) self.check_page_in_bounds(num_pages) group_qs, book_qs, page_group_count, page_book_count = self.paginate( group_qs, book_qs, group_count ) # Annotate if page_group_count: group_qs = self.annotate_card_aggregates(group_qs) group_qs = self.force_inner_joins(group_qs) if page_book_count: zero_pad = self._get_zero_pad(book_qs) book_qs = self.annotate_card_aggregates(book_qs) book_qs = self.force_inner_joins(book_qs) else: zero_pad = 1 # self._debug_queries(page_group_count, page_book_count, group_qs, book_qs) # noqa: ERA001 total_page_count = page_group_count + page_book_count mtime = self._get_page_mtime() return group_qs, book_qs, num_pages, total_page_count, zero_pad, mtime @override def get_object(self) -> MappingProxyType: """Validate settings and get the querysets.""" group_qs, book_qs, num_pages, total_count, zero_pad, mtime = ( self._get_group_and_books() ) # get additional context breadcrumbs = self.get_breadcrumbs() title = self.get_browser_page_title() # needs to happen after pagination # runs obj list query twice :/ libraries_exist = Library.objects.filter(covers_only=False).exists() # construct final data structure return MappingProxyType( { "breadcrumbs": breadcrumbs, "title": title, "model_group": self.model_group, "groups": group_qs, "books": book_qs, "zero_pad": zero_pad, "num_pages": num_pages, "total_count": total_count, "admin_flags": self.admin_flags, "libraries_exist": libraries_exist, "mtime": mtime, "search_error": self.search_error, "fts": self.fts_mode, } ) @extend_schema(parameters=[BrowserTitleView.input_serializer_class]) def get(self, *_args, **_kwargs) -> Response: """Get browser settings.""" data = self.get_object() serializer = self.get_serializer(data) return Response(serializer.data) ================================================ FILE: codex/views/browser/choices.py ================================================ """View for marking comics read and unread.""" from itertools import chain from types import MappingProxyType from typing import Any, override from caseconverter import snakecase from django.db.models import QuerySet from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.choices.browser import DUMMY_NULL_NAME, VUETIFY_NULL_CODE from codex.models import ( Comic, CreditPerson, StoryArc, ) from codex.models.identifier import IdentifierSource from codex.models.named import Universe from codex.serializers.browser.choices import ( BrowserChoicesFilterSerializer, BrowserFilterChoicesSerializer, ) from codex.serializers.browser.settings import BrowserFilterChoicesInputSerializer from codex.views.browser.filters.filter import BrowserFilterView from codex.views.settings import ( CREDIT_PERSON_UI_FIELD, IDENTIFIER_TYPE_UI_FIELD, STORY_ARC_UI_FIELD, ) _FIELD_TO_REL_MODEL_MAP = MappingProxyType( { CREDIT_PERSON_UI_FIELD: ( "credits__person", CreditPerson, ), IDENTIFIER_TYPE_UI_FIELD: ( "identifiers__source", IdentifierSource, ), STORY_ARC_UI_FIELD: ( "story_arc_numbers__story_arc", StoryArc, ), } ) _BACK_REL_MAP = MappingProxyType( { CreditPerson: "credit__", StoryArc: "storyarcnumber__", IdentifierSource: "identifier__", } ) _NULL_NAMED_ROW = MappingProxyType({"pk": VUETIFY_NULL_CODE, "name": DUMMY_NULL_NAME}) _NULL_NAMED_ROW_ITERABLE = (_NULL_NAMED_ROW,) class BrowserChoicesViewBase(BrowserFilterView): """Get choices for filter dialog.""" input_serializer_class: type[BrowserFilterChoicesInputSerializer] = ( # pyright: ignore[reportIncompatibleVariableOverride] BrowserFilterChoicesInputSerializer ) TARGET: str = "choices" @staticmethod def get_field_choices_query(comic_qs, field_name): """Get distinct values for the field.""" return comic_qs.exclude(**{f"{field_name}__isnull": True}).distinct() def get_m2m_field_query(self, model, comic_qs: QuerySet): """Get distinct m2m value objects for the relation.""" back_rel = _BACK_REL_MAP.get(model, "") m2m_filter = {f"{back_rel}comic__in": comic_qs} return model.objects.filter(**m2m_filter).distinct() @staticmethod def does_m2m_null_exist(comic_qs, rel): """Get if comics exist in the filter without values exists for an m2m field.""" # Detect if there are null choices. Regretably with another query. return comic_qs.filter(**{f"{rel}__isnull": True}).exists() def get_rel_and_model(self, field_name) -> tuple: """Return the relation and model for the field name.""" rel_and_model = _FIELD_TO_REL_MODEL_MAP.get(field_name) if rel_and_model: rel, model = rel_and_model else: remote_field = getattr( Comic._meta.get_field(field_name), "remote_field", None ) rel = field_name model = remote_field.model if remote_field else None return rel, model @override def get_object(self) -> QuerySet: """Get the comic subquery use for the choices.""" return self.get_filtered_queryset(Comic) @extend_schema(parameters=[input_serializer_class]) def get(self, *_args, **_kwargs) -> Response: """Return choices.""" obj = self.get_object() serializer = self.get_serializer(obj) return Response(serializer.data) class BrowserChoicesAvailableView(BrowserChoicesViewBase): """Get choices for filter dialog.""" serializer_class: type[BaseSerializer] | None = BrowserFilterChoicesSerializer @classmethod def _is_field_choices_exists(cls, comic_qs, field_name) -> bool: """Create a pk:name object for fields without tables.""" qs = cls.get_field_choices_query(comic_qs, field_name) return qs.exists() def _is_m2m_field_choices_exists(self, model, comic_qs, rel) -> bool: """Get choices with nulls where there are nulls.""" qs = self.get_m2m_field_query(model, comic_qs) qs = qs[:2] count = qs.count() if count > 1: # There are choices return True if count == 1: # There are only choices if a null exists return self.does_m2m_null_exist(comic_qs, rel) # There is only one or no choices. return False def _is_filter_field_choices_exists(self, qs: QuerySet, field_name: str) -> bool: rel, m2m_model = self.get_rel_and_model(field_name) if m2m_model: exists = self._is_m2m_field_choices_exists(m2m_model, qs, rel) else: exists = self._is_field_choices_exists(qs, field_name) try: flag = exists except TypeError: flag = False return flag @override def get_object(self) -> dict[str, Any]: # pyright: ignore[reportIncompatibleMethodOverride], # ty: ignore[invalid-method-override] """Get choice counts.""" qs = super().get_object() filters = self.params.get("filters", {}) data = {} serializer: BrowserFilterChoicesSerializer = self.serializer_class() # pyright: ignore[reportOptionalCall, reportAssignmentType], # ty: ignore[call-non-callable, invalid-assignment] for field_name in serializer.get_fields(): if field_name == "story_arcs" and qs.model is StoryArc: # don't allow filtering on story arc in story arc view. continue if bool(filters.get(field_name)): flag = True else: flag = self._is_filter_field_choices_exists(qs, field_name) data[field_name] = flag return data class BrowserChoicesView(BrowserChoicesViewBase): """Get choices for filter dialog.""" serializer_class: type[BaseSerializer] | None = BrowserChoicesFilterSerializer def _get_m2m_field_choices(self, model, comic_qs, rel): """Get choices with nulls where there are nulls.""" iterables = [] # Choices qs = self.get_m2m_field_query(model, comic_qs) values = ["pk", "name"] if qs.model == Universe: values.append("designation") qs = qs.values(*values) # Add null if it exists if self.does_m2m_null_exist(comic_qs, rel): iterables.append(_NULL_NAMED_ROW_ITERABLE) iterables.append(qs) return chain.from_iterable(iterables) def _get_field_name(self): field_name = self.kwargs.get("field_name", "") return snakecase(field_name) @override def get_object(self) -> dict[str, Any]: # pyright: ignore[reportIncompatibleMethodOverride], # ty: ignore[invalid-method-override] """Return choices with more than one choice.""" qs = super().get_object() field_name = self._get_field_name() rel, m2m_model = self.get_rel_and_model(field_name) if m2m_model: choices = self._get_m2m_field_choices(m2m_model, qs, rel) else: choices = self.get_field_choices_query(qs, field_name) choices = choices.values_list(field_name, flat=True) if field_name in ("critical_rating", "file_type"): choices = tuple({"pk": choice, "name": choice} for choice in choices) return { "field_name": field_name, "choices": choices, } ================================================ FILE: codex/views/browser/const.py ================================================ """Browser consts.""" BROWSER_FILTER_KEYS = ( "age_rating", "characters", "country", "credits", "critical_rating", "decade", "file_type", "genres", "identifier_source", "language", "locations", "monochrome", "original_format", "reading_direction", "series_groups", "stories", "story_arcs", "tagger", "tags", "teams", "universes", "year", ) ================================================ FILE: codex/views/browser/cover.py ================================================ """Comic cover thumbnail view.""" from collections.abc import Sequence from typing import Any, override from django.db import OperationalError from django.db.models.query_utils import Q from django.http import HttpResponse from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.renderers import BaseRenderer from codex.librarian.covers.create import CoverCreateThread from codex.librarian.covers.path import CoverPathMixin from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.models import Comic, Volume from codex.models.groups import Folder from codex.models.paths import CustomCover from codex.serializers.browser.settings import BrowserCoverInputSerializer from codex.views.browser.annotate.order import BrowserAnnotateOrderView from codex.views.const import ( CUSTOM_COVER_GROUP_RELATION, GROUP_RELATION, MISSING_COVER_FN, MISSING_COVER_NAME_MAP, STATIC_IMG_PATH, ) class WEBPRenderer(BaseRenderer): """Render WEBP images.""" media_type = "image/webp" format = "webp" charset: str | None = None render_style = "binary" @override def render(self, data, *_args, **_kwargs) -> Any: """Return raw data.""" return data class CoverView(BrowserAnnotateOrderView): """ComicCover View.""" input_serializer_class: type[BrowserCoverInputSerializer] = ( # pyright: ignore[reportIncompatibleVariableOverride] BrowserCoverInputSerializer ) renderer_classes: Sequence[type[BaseRenderer]] = (WEBPRenderer,) content_type = "image/webp" TARGET: str = "cover" @override def get_group_filter(self, group=None, pks=None, *, page_mtime=False) -> Q: """Get group filter for First Cover View.""" if self.params.get("dynamic_covers") or self.model in (Volume, Folder): return super().get_group_filter(group=group, pks=pks, page_mtime=page_mtime) # First cover group filter relies on sort names to look outside the browser supplied pks # For multi_groups not in the browser query. pks = self.kwargs["pks"] if not self.model: qs = Comic.objects.none() else: qs = self.model.objects.filter(pk__in=pks) sort_names = qs.values_list("sort_name", flat=True).distinct() model_rel = GROUP_RELATION[self.model_group] group_filter = {f"{model_rel}__sort_name__in": sort_names} parent_route = self.params.get("parent_route", {}) if parent_pks := parent_route.get("pks"): parent_rel = GROUP_RELATION[parent_route["group"]] group_filter[f"{parent_rel}__pk__in"] = parent_pks return Q(**group_filter) def _get_comic_cover(self) -> tuple: pks = self.kwargs["pks"] return pks[0], False def _get_custom_cover(self) -> CustomCover | None: """Get Custom Cover.""" if self.model is Volume or not self.params.get("custom_covers"): return None group = self.kwargs["group"] group_rel = CUSTOM_COVER_GROUP_RELATION[group] pks = self.kwargs["pks"] comic_filter = {f"{group_rel}__in": pks} qs = CustomCover.objects.filter(**comic_filter) qs = qs.only("pk") return qs.first() def _get_dynamic_cover(self) -> tuple: """Get dynamic cover.""" comic_qs = self.get_filtered_queryset(Comic) comic_qs = self.annotate_order_aggregates(comic_qs) comic_qs = self.add_order_by(comic_qs) comic_qs = comic_qs.only("pk") comic = comic_qs.first() cover_pk = comic.pk if comic else 0 return cover_pk, False def _get_cover_pk(self) -> tuple[int, bool]: """Get Cover Pk queryset for comic queryset.""" if self.model is Comic: cover_pk, custom = self._get_comic_cover() elif custom_cover := self._get_custom_cover(): cover_pk = custom_cover.pk custom = True else: cover_pk, custom = self._get_dynamic_cover() return cover_pk, custom def _get_missing_cover_path(self) -> tuple: """Get the missing cover, which is a default svg if fetched for a group.""" group: str = self.kwargs["group"] cover_name = MISSING_COVER_NAME_MAP.get(group) if cover_name: cover_fn = cover_name + ".svg" content_type = "image/svg+xml" else: cover_fn = MISSING_COVER_FN content_type = "image/webp" cover_path = STATIC_IMG_PATH / cover_fn return cover_path, content_type def _get_cover_data(self, pk, *, custom: bool) -> tuple: thumb_buffer = False content_type = "image/webp" cover_path = CoverPathMixin.get_cover_path(pk, custom=custom) if not cover_path.exists(): thumb_buffer = CoverCreateThread.create_cover_from_path( pk, str(cover_path), logger, LIBRARIAN_QUEUE, custom=custom ) if not thumb_buffer: cover_path, content_type = self._get_missing_cover_path() elif cover_path.stat().st_size == 0: cover_path, content_type = self._get_missing_cover_path() return thumb_buffer, cover_path, content_type @extend_schema( parameters=[BrowserAnnotateOrderView.input_serializer_class], responses={(200, content_type): OpenApiTypes.BINARY}, ) def get(self, *args, **kwargs) -> HttpResponse: """Get comic cover.""" try: try: pk, custom = self._get_cover_pk() except OperationalError as exc: self._handle_operational_error(exc) pk = 0 custom = False cover_buffer, cover_path, content_type = self._get_cover_data( pk, custom=custom ) if not cover_buffer: cover_buffer = cover_path.read_bytes() return HttpResponse(cover_buffer, content_type=content_type) except Exception: logger.exception("Get cover") raise ================================================ FILE: codex/views/browser/download.py ================================================ """Download a group of comics in a zipfile.""" from typing import override from django.http.response import FileResponse, Http404 from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import extend_schema from loguru import logger from zipstream import ZipStream from codex.views.browser.filters.filter import BrowserFilterView class GroupDownloadView(BrowserFilterView): """Return a group of comic archives as a streaming zipfile.""" content_type = "application/zip" AS_ATTACHMENT = True TARGET: str = "download" @override def get_object(self) -> tuple[str, ...]: """Get comic paths for a browse group.""" group = self.kwargs.get("group") pks = self.kwargs.get("pks") if not self.model: reason = f"Could not find model for group {group}" raise Http404(reason) try: qs = self.get_filtered_queryset(self.model, group=group, pks=pks) path_rel = self.rel_prefix + "path" paths = qs.values_list(path_rel, flat=True) except Exception as exc: logger.warning(f"Error with download query for {group}:{pks} {exc}") raise if not paths: reason = f"Comics from {group}:{pks} not not found." raise Http404(reason) return tuple(sorted(set(paths))) @extend_schema(responses={(200, content_type): OpenApiTypes.BINARY}) def get(self, *_args, **kwargs) -> FileResponse: """Stream a zip archive of many comics.""" paths = self.get_object() zs = ZipStream(sized=True) for path in paths: zs.add_path(path) download_file = zs filename = kwargs.get("filename") if not filename: pks = self.kwargs.get("pks") name = self.model.__name__ if self.model else "No Model" filename = f"{name} {pks} Comics.zip" headers = {"Content-Length": len(zs), "Last-Modified": zs.last_modified} return FileResponse( download_file, as_attachment=self.AS_ATTACHMENT, content_type=self.content_type, filename=filename, headers=headers, ) ================================================ FILE: codex/views/browser/filters/__init__.py ================================================ """Browser filter mixins.""" ================================================ FILE: codex/views/browser/filters/bookmark.py ================================================ """Bookmark filter view methods.""" from django.db.models import Q from codex.views.bookmark import BookmarkFilterMixin from codex.views.browser.validate import BrowserValidateView class BrowserFilterBookmarkView(BookmarkFilterMixin, BrowserValidateView): """BookmarkFilter view methods.""" def __init__(self, *args, **kwargs) -> None: """Initialize Bookmark Filter.""" self.init_bookmark_filter() super().__init__(*args, **kwargs) def get_bookmark_filter(self, model): """Build bookmark query.""" choice: str = self.params.get("filters", {}).get("bookmark", "") if choice: bm_rel = self.get_bm_rel(model) my_bookmark_filter = self.get_my_bookmark_filter(bm_rel) if choice == "READ": bookmark_filter = my_bookmark_filter & Q( **{f"{bm_rel}__finished": True} ) else: my_not_finished_filter = my_bookmark_filter & Q( **{f"{bm_rel}__finished__in": (False, None)} ) if choice == "UNREAD": bookmark_filter = Q(**{bm_rel: None}) | my_not_finished_filter else: # IN_PROGRESS bookmark_filter = my_not_finished_filter & Q( **{f"{bm_rel}__page__gt": 0} ) else: bookmark_filter = Q() return bookmark_filter ================================================ FILE: codex/views/browser/filters/field.py ================================================ """Comic field filters.""" from types import MappingProxyType from django.db.models.query_utils import Q from codex.views.browser.const import BROWSER_FILTER_KEYS from codex.views.browser.filters.group import GroupFilterView from codex.views.settings import ( CREDIT_PERSON_UI_FIELD, IDENTIFIER_TYPE_UI_FIELD, STORY_ARC_UI_FIELD, ) _FILTER_REL_MAP = MappingProxyType( { CREDIT_PERSON_UI_FIELD: "credits__person", STORY_ARC_UI_FIELD: "story_arc_numbers__story_arc", IDENTIFIER_TYPE_UI_FIELD: "identifiers__source", } ) _FILTER_ATTRIBUTES: frozenset[str] = frozenset(BROWSER_FILTER_KEYS) class ComicFieldFilterView(GroupFilterView): """Comic field filters.""" @staticmethod def _filter_by_comic_field(field, rel_prefix, filter_list) -> Q: """Filter by a comic any2many attribute.""" filter_query = Q() if not filter_list: return filter_query rel = rel_prefix + _FILTER_REL_MAP.get(field, field) for index, val in enumerate(filter_list): # None values in a list don't work right so test for them separately if val is None: del filter_list[index] filter_query |= Q(**{f"{rel}__isnull": True}) if filter_list: filter_query |= Q(**{f"{rel}__in": filter_list}) return filter_query @classmethod def get_all_comic_field_filters(cls, rel_prefix, filters) -> Q: """Get all comicfiled filters for rel_prefix.""" comic_field_filter = Q() for field in _FILTER_ATTRIBUTES: filter_list = filters.get(field, []) comic_field_filter &= cls._filter_by_comic_field( field, rel_prefix, filter_list ) return comic_field_filter def get_comic_field_filter(self, model) -> Q: """Filter the comics based on the form filters.""" rel_prefix = self.get_rel_prefix(model) filters = self.params["filters"] return self.get_all_comic_field_filters(rel_prefix, filters) ================================================ FILE: codex/views/browser/filters/filter.py ================================================ """Browser Filters.""" from django.db.models.query import QuerySet from django.db.models.query_utils import Q from codex.models.comic import Comic from codex.views.browser.filters.bookmark import BrowserFilterBookmarkView class BrowserFilterView(BrowserFilterBookmarkView): """Browser Filters.""" def force_inner_joins(self, qs): """Force INNER JOINS to filter empty groups.""" demote_tables = {"codex_library"} if qs.model is not Comic: demote_tables.add("codex_comic") if self.fts_mode: # Forcing INNER JOINS required to make fts5 work demote_tables.add("codex_comicfts") return qs.demote_joins(demote_tables) def _get_query_filters( self, model, page_mtime, bookmark_filter, group=None, pks=None, ) -> Q: """Return all the filters except the group filter.""" big_include_filter = Q() big_exclude_filter = Q() big_include_filter &= self.get_group_acl_filter(model, self.request.user) big_include_filter &= self.get_group_filter(group, pks, page_mtime=page_mtime) big_include_filter &= self.get_comic_field_filter(model) if bookmark_filter: big_include_filter &= self.get_bookmark_filter(model) include_search_filter, exclude_search_filter, fts_q = self.get_search_filters( model ) big_include_filter &= include_search_filter big_exclude_filter &= exclude_search_filter return big_include_filter & ~big_exclude_filter & fts_q def get_filtered_queryset( self, model, group=None, pks=None, *, page_mtime=False, bookmark_filter=True, ) -> QuerySet: """Get a filtered queryset for the model.""" query_filters = self._get_query_filters( model, page_mtime=page_mtime, bookmark_filter=bookmark_filter, group=group, pks=pks, ) # Distinct necessary for folder view with search return model.objects.filter(query_filters).distinct() ================================================ FILE: codex/views/browser/filters/group.py ================================================ """Group Filters.""" from django.db.models.query_utils import Q from codex.views.browser.params import BrowserParamsView from codex.views.const import ( FILTER_ONLY_GROUP_RELATION, FOLDER_GROUP, GROUP_RELATION, ) _GROUP_REL_TARGETS = frozenset({"cover", "choices", "bookmark"}) _PK_REL_TARGETS = frozenset({"metadata", "mtime"}) class GroupFilterView(BrowserParamsView): """Group Filters.""" TARGET: str = "" def _get_rel_for_pks(self, group, *, page_mtime: bool): """Get the relation from the model to the pks.""" if self.TARGET in _GROUP_REL_TARGETS: rel = FILTER_ONLY_GROUP_RELATION[group] elif self.TARGET in _PK_REL_TARGETS or page_mtime: # metadata, mtime, browser.page_mtime rel = "pk" elif self.TARGET == "download": rel = "comic__folders" if group == "f" else "pk" else: # browser.group, opds rel = GROUP_RELATION[group] rel += "__in" return rel def get_group_filter(self, group=None, pks=None, *, page_mtime=False) -> Q: """Get filter for the displayed group.""" if group is None: group = self.kwargs["group"] if pks is None: pks = self.kwargs["pks"] if pks and 0 not in pks: rel = self._get_rel_for_pks(group, page_mtime=page_mtime) group_filter_dict = {rel: pks} elif group == FOLDER_GROUP and self.TARGET != "choices": # Top folder search group_filter_dict = {"parent_folder": None} else: group_filter_dict = {} return Q(**group_filter_dict) ================================================ FILE: codex/views/browser/filters/search/__init__.py ================================================ """Search filter.""" ================================================ FILE: codex/views/browser/filters/search/field/__init__.py ================================================ """Column Lookup Queries from field tokens.""" ================================================ FILE: codex/views/browser/filters/search/field/column.py ================================================ """Parse the field side of a field lookup.""" from types import MappingProxyType from django.db.models import CharField from django.db.models.fields.related import ForeignKey, ManyToManyField from codex.models.comic import Comic _FIELD_TO_REL_SPAN_MAP = MappingProxyType( { "role": "credits__role__name", "credits": "credits__person__name", "identifiers": "identifiers__key", "sources": "identifiers__source__name", "story_arcs": "story_arc_number__story_arc__name", } ) _FIELD_TYPE_MAP = MappingProxyType( { **{ field.name: field.__class__ for field in Comic._meta.get_fields() if field.concrete # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] }, "role": ManyToManyField, "issue": CharField, } ) def _parse_field_rel(field_name, rel_class) -> tuple: """Set rel to comic attribute or relation span.""" rel = _FIELD_TO_REL_SPAN_MAP.get(field_name, "") if not rel and rel_class in (ForeignKey, ManyToManyField): # This must be after the special span maps rel = f"{field_name}__name" if rel.endswith(("name", "key")): rel_class = CharField if not rel: # Comic attribute rel = field_name return rel_class, rel def parse_field(field_name: str) -> tuple: """Parse the field size of the query in to database relations.""" rel_class = _FIELD_TYPE_MAP.get(field_name) if not rel_class: reason = f"Unknown field specified in search query {field_name}" raise ValueError(reason) many_to_many = rel_class == ManyToManyField rel_class, rel = _parse_field_rel(field_name, rel_class) return rel_class, rel, many_to_many ================================================ FILE: codex/views/browser/filters/search/field/expression.py ================================================ """Parse field lookup right hand side expression.""" import re from datetime import date, datetime from decimal import Decimal from types import MappingProxyType from typing import Any from comicbox.fields.fields import IssueField from dateparser import parse from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import ( BooleanField, CharField, DateField, DateTimeField, TextField, ) from django.db.models.fields import DecimalField, PositiveSmallIntegerField from codex.settings import FALSY _QUOTES_RE = re.compile(r"[\"']") _OP_MAP = MappingProxyType({">": "gt", ">=": "gte", "<": "lt", "<=": "lte"}) _RANGE_RE = re.compile(r"\.{2,}") _PARSE_ISSUE_MATCHER = re.compile(r"(?P\d*\.?\d*)(?P.*)") _LIKE_QUERY_VALUE = re.compile(r"\S\*+\S") _ICONTAINS_QUERY_VALUE = re.compile(r"^(\*.*\*|[^*].*[^*]|^\**$)$") _IENDSWITH_QEURY_VALUE = re.compile(r"^\*") _ISTARTSWITH_QEURY_VALUE = re.compile(r"\*$") _SIZE_UNITS = {"b": 1, "kb": 1024, "mb": 1024**2, "gb": 1024**3, "tb": 1024**4} def parse_size(s: str) -> int: """Parse human friendly byte sizes.""" s = s.strip().lower() for suffix, multiplier in _SIZE_UNITS.items(): if s.endswith(suffix): return int(float(s[: -len(suffix)].strip()) * multiplier) return int(s) def _parse_issue_value(value) -> tuple | tuple[None, None]: """Parse a compound issue value into number & suffix.""" value = IssueField.parse_issue(value) if not value: return None, None matches = _PARSE_ISSUE_MATCHER.match(value) if not matches: return None, None numeric_value = Decimal(matches.group("issue_number")) suffix_value = matches.group("issue_suffix") return numeric_value, suffix_value def _parse_issue_values(rel, value, to_value=None) -> dict: """Issue is not a column. Convert to issue_number and issue_suffix.""" numeric_value, suffix_value = _parse_issue_value(value) if to_value is not None: to_numeric_value, to_suffix_value = _parse_issue_value(to_value) else: to_numeric_value = to_suffix_value = None q_dict = {} if numeric_value is not None: issue_number_field = rel.replace("issue", "issue_number") if to_numeric_value is not None: numeric_value = (numeric_value, to_numeric_value) q_dict[issue_number_field] = numeric_value if suffix_value is not None: issue_suffix_field = rel.replace("issue", "issue_suffix") if to_suffix_value is not None: suffix_value = (suffix_value, to_suffix_value) q_dict[issue_suffix_field] = suffix_value return q_dict def _cast_value(rel, rel_class, value) -> int | Decimal | bool | date | datetime | None: """Cast values by relation class.""" if rel.startswith("size"): value = parse_size(value) elif rel_class == PositiveSmallIntegerField: value = int(value) elif rel_class == DecimalField: value = Decimal(value) elif rel_class == BooleanField: value = value not in FALSY elif rel_class in (DateField, DateTimeField): value = parse(value) if rel_class == DateField and value: value = value.date() # (CharField, TextField): return value def _glob_to_lookup(value) -> tuple[str, str]: """Transform globs into django orm lookups.""" rel_suffix = "" value = value.strip('"').strip("'") if _LIKE_QUERY_VALUE.search(value): # Django doesn't have a builtin interior LIKE operator. # Escape LIKE reserved chars value = BaseDatabaseOperations(None).prep_for_like_query(value) value = value.replace("*", "%") rel_suffix = "__like" elif _ICONTAINS_QUERY_VALUE.search(value): rel_suffix = "__icontains" value = value.replace("*", "") elif _IENDSWITH_QEURY_VALUE.search(value): rel_suffix = "__iendswith" value = value.replace("*", "") elif _ISTARTSWITH_QEURY_VALUE.search(value): # This never happens because full text search hijacks it early. rel_suffix = "__istartswith" value = value.replace("*", "") return value, rel_suffix def _parse_operator_numeric(rel, rel_class, value) -> dict[Any, int | None] | dict: value = _cast_value(rel, rel_class, value) if value is None: return {} return {rel: value} def _parse_operator_text(rel, exp) -> dict[Any, str] | dict: """Parse text value operators.""" if rel == "issue": return _parse_issue_values(rel, exp) value, rel_suffix = _glob_to_lookup(exp) rel += rel_suffix return {rel: value} def _parse_operator(operator, rel, rel_class, exp) -> dict: """Move value operator out of value into relation operator.""" lookup = _OP_MAP[operator] span_rel = f"{rel}__{lookup}" if operator else rel value = exp[len(operator) :] if rel == "issue": return _parse_issue_values(span_rel, value) return _parse_operator_numeric(span_rel, rel_class, value) def _parse_operator_range(rel, rel_class, value) -> dict: """Parse range operator.""" range_from_value, range_to_value = _RANGE_RE.split(value, 1) rel = f"{rel}__range" if rel == ("issue__range"): return _parse_issue_values( rel, range_from_value, range_to_value, ) range_value = ( _cast_value(rel, rel_class, range_from_value), _cast_value(rel, rel_class, range_to_value), ) return {rel: range_value} def parse_expression(rel, rel_class, exp) -> dict: """Parse the operators of the value size of the field query.""" exp = _QUOTES_RE.sub("", exp) for op in _OP_MAP: if exp.startswith(op): q_dict = _parse_operator( op, rel, rel_class, exp, ) break else: if ".." in exp: q_dict = _parse_operator_range(rel, rel_class, exp) elif (issubclass(rel_class, CharField | TextField)) and not rel.startswith( "volume" ): q_dict = _parse_operator_text(rel, exp) else: q_dict = _parse_operator_numeric(rel, rel_class, exp) return q_dict ================================================ FILE: codex/views/browser/filters/search/field/filter.py ================================================ """Parse the browser query by removing field queries and doing them with the ORM.""" from typing import Any from django.db.models import Q from loguru import logger from codex.models.base import BaseModel from codex.views.browser.filters.field import ComicFieldFilterView from codex.views.browser.filters.search.field.column import parse_field from codex.views.browser.filters.search.field.parse import get_field_query class BrowserFieldQueryFilter(ComicFieldFilterView): """Parse the browser query by removing field queries and doing them with the ORM.""" @staticmethod def _combine_q(q: Q, other_q: tuple[str, Any] | Q, op: str) -> Q: if isinstance(other_q, tuple): rel: str rel, val = other_q # ty: ignore[invalid-assignment] if rel.endswith("__like") and val == "%": # Remove likes that would match everything return q other_q = Q(**{rel: val}) if op == Q.AND: q &= other_q else: q |= other_q return q @classmethod def _hoist_filters( cls, filter_q_list: list[Q], exclude_q_list: list[Q], new_q: Q ) -> None: """Peel top layer of queries into multiple filter and exclude clauses.""" # This makes m2m queries behave more as expected and may optimize fk queries. if new_q.connector == Q.AND: for child in new_q.children: if isinstance(child, Q) and child.negated: child.negated = False q = cls._combine_q(Q(), child, new_q.connector) exclude_q_list.append(q) else: q = cls._combine_q(Q(), child, new_q.connector) filter_q_list.append(q) else: filter_q_list[0] = cls._combine_q(filter_q_list[0], new_q, new_q.connector) def _parse_field_query( self, col: str, exp: str, model: type[BaseModel], filter_q_list: list[Q], exclude_q_list: list[Q], ) -> None: try: rel_class, rel, many_to_many = parse_field(col) if q := get_field_query( rel, rel_class, exp, model, many_to_many=many_to_many ): self._hoist_filters(filter_q_list, exclude_q_list, q) except Exception as exc: token = f"{col}:{exp}" msg = f"Parsing field query {token} - {exc}" logger.warning(msg) self.search_error = msg def _parse_compound_field_query( self, cols: tuple[str, ...], exp: str, model: type[BaseModel], filter_q_list: list[Q], exclude_q_list: list[Q], ) -> None: """Parse a compound alias (e.g. protagonist) into an OR'd Q across columns.""" compound_q = Q() for col in cols: try: rel_class, rel, many_to_many = parse_field(col) if q := get_field_query( rel, rel_class, exp, model, many_to_many=many_to_many ): compound_q |= q except Exception as exc: token = f"{col}:{exp}" msg = f"Parsing compound field query {token} - {exc}" logger.warning(msg) self.search_error = msg if compound_q: self._hoist_filters(filter_q_list, exclude_q_list, compound_q) def get_search_field_filters(self, model, field_token_pairs) -> tuple[list, list]: """Parse and apply field query filters.""" filter_q_list = [] exclude_q_list = [] if not field_token_pairs: return filter_q_list, exclude_q_list filter_q_list.append(Q()) exclude_q_list.append(Q()) for col, exp in field_token_pairs: if isinstance(col, tuple): self._parse_compound_field_query( col, exp, model, filter_q_list, exclude_q_list ) else: self._parse_field_query(col, exp, model, filter_q_list, exclude_q_list) return filter_q_list, exclude_q_list ================================================ FILE: codex/views/browser/filters/search/field/optimize.py ================================================ """Optimize Text Lookups.""" # UNUSED CODE multiple like queries test better import re from django.db.models.query_utils import Q # Just an unlikely character _PERCENT_PLACEHOLDER = "Ɍ" def _like_to_regex(like): """Transform a glob into a safe regex for sqlite3.""" # unescape like like = like.replace(r"\%", _PERCENT_PLACEHOLDER) like = like.replace(r"\_", "_") while like[0] == "%" and like[-1] == "%": like = like[1:-1] parts = like prefix = suffix = "" if parts[0] == "%": parts = parts[1:] suffix = "$" elif parts[-1] == "%": parts = parts[:-1] prefix = "^" if not parts: return parts regex = parts.replace(_PERCENT_PLACEHOLDER, "%") regex = re.escape(regex) regex = regex.replace("%", ".*") return prefix + regex + suffix def _regex_like(regex, lookahead): """Bracket a regex with any characters if positive lookahead.""" if lookahead == "=": regex = ".*" + regex + ".*" return regex def like_qs_to_regex_q(q: Q, regex_op: str, *, many_to_many: bool) -> Q: """Optimize a tree of like lookup qs to one regex q.""" regexes = [] # Optimize like equations into regexes rel = "" if many_to_many: regex_op = "|" lookahead = "!" if q.negated else ":" if regex_op == "|" else "=" for child_q in q.children: rel, like = child_q value = _like_to_regex(like) value = _regex_like(value, lookahead) regex = rf"(?{lookahead}{value})" regexes.append(regex) regex_value = regex_op.join(regexes) regex_value = _regex_like(regex_value, lookahead) rel = rel.replace("like", "iregex") return Q(**{rel: regex_value}) ================================================ FILE: codex/views/browser/filters/search/field/parse.py ================================================ """Parse field boolean expressions into Django ORM Queries.""" import re from typing import Any from django.db.models import Q from lark import Lark, Token, Transformer, Tree from codex.models.base import MAX_NAME_LEN, BaseModel from codex.models.comic import Comic from codex.views.browser.filters.search.field.expression import parse_expression _QUOTES_REXP = r"""(?:\".*?\")""" _OPERATORS_REXP = "and not|or not|and|or" _BEGIN_NOT_REXP = r"^\s*\(?\s*(?Pnot)" _IMPLICIT_AND_REXP = ( rf"""{_QUOTES_REXP}|\ (?P{_OPERATORS_REXP})\ |(?P(?:\ not)?\ )\S""" ) _BEGIN_NOT_RE = re.compile(_BEGIN_NOT_REXP, flags=re.IGNORECASE) _IMPLICIT_AND_RE = re.compile(_IMPLICIT_AND_REXP, flags=re.IGNORECASE) _GRAMMAR = ( r""" ?start: or_expr ?or_expr: and_expr (OR and_expr)* ?and_expr: not_expr (AND not_expr)* ?not_expr: NOT not_expr -> not_op | atom ?atom: "(" or_expr ")" | QUOTED | WORD OR: /or/i AND: /and/i NOT: /not/i QUOTED: "\"" /[^"]*/ "\"" WORD: /[^\s()",]{1,""" + str(MAX_NAME_LEN) + r"""}/ %ignore /\s+/ """ ) PARSER = Lark(_GRAMMAR, parser="lalr", maybe_placeholders=False) class FieldQueryTransformer(Transformer): """Transform parse tree into Django Q objects.""" def __init__( self, rel: str, rel_class: type, model: type, *, many_to_many: bool, ) -> None: """Initialize context.""" super().__init__() self._rel = rel self._rel_class = rel_class self._model = model self._many_to_many = many_to_many def _prefix_q_dict(self, q_dict: dict) -> dict: """Add or subtract relation prefixes to q_dict for the model.""" prefix = "" if self._model == Comic else "comic__" model_span = self._model.__name__.lower() + "__" prefixed_q_dict = {} for parsed_rel, value in q_dict.items(): prefixed_rel = ( parsed_rel.removeprefix(model_span) if parsed_rel.startswith(model_span) else prefix + parsed_rel ) prefixed_q_dict[prefixed_rel] = value return prefixed_q_dict def _make_operand_q(self, token: Token) -> Q: """Construct Django ORM Query from a leaf operand value.""" if (q_dict := parse_expression(self._rel, self._rel_class, str(token))) and ( prefixed_q_dict := self._prefix_q_dict(q_dict) ): return Q(**prefixed_q_dict) return Q() def QUOTED(self, token: Token) -> Q: # noqa: N802 """Handle quoted string operand.""" return self._make_operand_q(token) def WORD(self, token: Token) -> Q: # noqa: N802 """Handle bare word operand.""" return self._make_operand_q(token) def not_op(self, args: list[Any]) -> Q: """Negate the child query.""" return ~args[0] def or_expr(self, args: list[Any]) -> Q: """Combine children with OR.""" q = Q() for arg in args: if isinstance(arg, Q): q |= arg return q def and_expr(self, args: list[Any]) -> Q: """Combine children with AND.""" q = Q() for arg in args: if isinstance(arg, Q): q &= arg return q def get_field_query( rel: str, rel_class: type, exp: str, model: type[BaseModel], *, many_to_many: bool, ) -> Q: """Convert rel and text expression into queries.""" # Allow negative column search begin_not_match = _BEGIN_NOT_RE.search(exp) if begin_not_match: start = begin_not_match.start("not") exp = exp[:start] + '"" and ' + exp[start:] # Add implicit and for the parser exp = _IMPLICIT_AND_RE.sub( lambda m: f" and{m.group(0)}" if m.group("bare") else m.group(0), exp ) tree: Tree = PARSER.parse(exp) transformer = FieldQueryTransformer( rel, rel_class, model, many_to_many=many_to_many ) result: Q = transformer.transform(tree) return result ================================================ FILE: codex/views/browser/filters/search/fts.py ================================================ """Search Filters Methods.""" from loguru import logger from codex.views.browser.filters.search.field.filter import BrowserFieldQueryFilter class BrowserFTSFilter(BrowserFieldQueryFilter): """Search Filters Methods.""" def get_fts_filter(self, model, text) -> dict: """Perform the search and return the scores as a dict.""" fts_filter = {} try: if text: rel = self.get_rel_prefix(model) # Custom lookup defined in codex.models rel += "comicfts__match" fts_filter[rel] = text except Exception: logger.exception("Getting Full Text Search Filter.") self.search_error = "Error creating full text search filter" return fts_filter ================================================ FILE: codex/views/browser/filters/search/parse.py ================================================ """Search Filters Methods.""" import re from types import MappingProxyType from django.db.models.query_utils import Q from loguru import logger from codex.choices.admin import AdminFlagChoices from codex.choices.search import FIELDMAP from codex.models import AdminFlag from codex.models.comic import ComicFTS from codex.settings import BROWSER_MAX_OBJ_PER_PAGE from codex.views.browser.filters.search.fts import BrowserFTSFilter _FTS_COLUMNS = frozenset( {field.name for field in ComicFTS._meta.get_fields()} - {"comic", "updated_at", "created_at"} ) _NON_FTS_COLUMNS = frozenset( { "created_at", "critical_rating", "date", "day", "decade", "file_type", "identifiers", "issue", "issue_number", "issue_suffix", "main_character", "main_team", "month", "monochrome", "notes", "path", "page_count", "reading_direction", "size", "updated_at", "volume", "volume_to", "year", } ) _VALID_COLUMNS = frozenset(_FTS_COLUMNS | _NON_FTS_COLUMNS) _QUOTES_REXP = r"\".*?\"" _COLUMN_EXPRESSION_OPERATORS_REXP = ( rf"(?:{_QUOTES_REXP})|(?P\B[\*\<\>]\w|\.{(2,)}|\w\*\w)" ) _COLUMN_EXPRESSION_OPERATORS_RE = re.compile(_COLUMN_EXPRESSION_OPERATORS_REXP) _FTS_OPERATORS = frozenset({"and", "not", "or", "near"}) _FTS_OPERATOR_REXP = rf"(?P\b{'|'.join(_FTS_OPERATORS)}\b)" _FTS_OPERATOR_RE = re.compile(_FTS_OPERATOR_REXP, flags=re.IGNORECASE) _MULTI_COL_REXP = r"(?P\{.*?\})" _SINGLE_COL_REXP = r"(?P[a-z_]+)" _EXP_REXP = rf"\s*(?P\(.*?\)|{_QUOTES_REXP}|\S+)" _COL_REXP = rf"({_MULTI_COL_REXP}|{_SINGLE_COL_REXP}):{_EXP_REXP}" _TOKEN_PRE_OP_REXP = r"(?:(?Pand|or|not)\s+)?" # noqa: S105 _TOKEN_REXP = rf"(?P{_TOKEN_PRE_OP_REXP}{_COL_REXP}|\S+)" _TOKEN_RE = re.compile(_TOKEN_REXP, flags=re.IGNORECASE) _ALIAS_FIELD_MAP = MappingProxyType( {value: key for key, values in FIELDMAP.items() for value in values} ) # Compound aliases expand into OR queries across multiple columns. # Individual aliases (protag, lead) are resolved to "protagonist" by # _ALIAS_FIELD_MAP via FIELDMAP before reaching this map. _COMPOUND_COLUMN_MAP = MappingProxyType( { "protagonist": ("main_character", "main_team"), } ) class SearchFilterView(BrowserFTSFilter): """Search Query Parser.""" ADMIN_FLAGS: tuple[AdminFlagChoices, ...] = (AdminFlagChoices.FOLDER_VIEW,) def __init__(self, *args, **kwargs) -> None: """Initialize search variables.""" super().__init__(*args, **kwargs) self._admin_flags: MappingProxyType[str, bool] | None = None self.fts_mode = False self.search_mode = False self.search_error = "" @property def admin_flags(self) -> MappingProxyType[str, bool]: """Set browser relevant admin flags.""" if self._admin_flags is None: if self.ADMIN_FLAGS: admin_pairs = AdminFlag.objects.filter( key__in=(enum.value for enum in self.ADMIN_FLAGS) ).values_list("key", "on") else: admin_pairs = () admin_flags = {} for key, on in admin_pairs: export_key = AdminFlagChoices(key).name.lower() admin_flags[export_key] = on self._admin_flags = MappingProxyType(admin_flags) return self._admin_flags def _is_path_column_allowed(self) -> bool: """Is path column allowed.""" return self.is_admin or bool(self.admin_flags["folder_view"]) @staticmethod def _is_column_operators_used(exp) -> bool: """Detect column expression operators, but not inside quotes.""" for match in _COLUMN_EXPRESSION_OPERATORS_RE.finditer(exp): if match.group("star"): return True return False def _add_field_token(self, preop, col, exp, field_tokens) -> None: """Add a field token entry with the given preop.""" if not preop: preop = "and" if preop not in field_tokens: field_tokens[preop] = set() field_tokens[preop].add((col, exp)) def _parse_column_match( self, preop, col, exp, field_tokens ) -> bool: # , fts_tokens): col = _ALIAS_FIELD_MAP.get(col.lower(), col.lower()) # Compound aliases expand into OR queries across multiple columns. # Store the tuple of cols as the field_token key; filter.py ORs them. if compound_cols := _COMPOUND_COLUMN_MAP.get(col): self._add_field_token(preop, compound_cols, exp, field_tokens) return True if col not in _VALID_COLUMNS: return True if col == "path" and not self._is_path_column_allowed(): return True if col in _NON_FTS_COLUMNS or self._is_column_operators_used(exp): self._add_field_token(preop, col, exp, field_tokens) return True return False @staticmethod def _add_fts_token(fts_tokens, token) -> None: token = _FTS_OPERATOR_RE.sub(lambda op: op.group("operator").upper(), token) if ":" in token: col, value = token.split(":") col = _ALIAS_FIELD_MAP.get(col.lower(), col) token = f"{col}:{value}" elif token.lower() not in _FTS_OPERATORS and not ( token.startswith('"') and token.endswith('"') ): token = f'"{token}"' fts_tokens.append(token) def _preparse_search_query_token(self, match, field_tokens, fts_tokens) -> None: token = match.group("token") if not token: return multi_col = match.group("multi_col") col = match.group("col") exp = match.group("exp") if exp: exp = exp.strip("'").strip('"') if multi_col or not col or not exp: # I could add multi-col to field groups, but nobody will care. self._add_fts_token(fts_tokens, token) return preop = match.group("preop") if not self._parse_column_match(preop, col, exp, field_tokens): if col and exp: token = f"{col}:{exp}" self._add_fts_token(fts_tokens, token) def _preparse_search_query(self) -> tuple[dict, str] | tuple: """Preparse search fields out of query text.""" text = self.params.get("search") field_tokens = {} if not text: return field_tokens, text fts_tokens = [] for match in _TOKEN_RE.finditer(text): try: self._preparse_search_query_token(match, field_tokens, fts_tokens) except Exception as exc: tok = match.group(0) if match else "" logger.debug(f"Error preparsing search query token {tok}: {exc}") self.search_error = "Syntax error" text = " ".join(fts_tokens) return field_tokens, text def _create_search_filters(self, model) -> tuple[list, list, Q]: field_tokens_dict, fts_text = self._preparse_search_query() field_filter_q_list = [] field_exclude_q_list = [] for preop, field_token_pairs in field_tokens_dict.items(): if preop == "not": exclude_q_list, include_q_list = self.get_search_field_filters( model, field_token_pairs ) else: # AND and OR # Cannot do OR queries with MATCH, it decontextualizes MATCH somehow. if preop == "or": self.search_error = "OR preceding column tokens with operator expressions will act as AND" include_q_list, exclude_q_list = self.get_search_field_filters( model, field_token_pairs ) field_filter_q_list += include_q_list field_exclude_q_list += exclude_q_list fts_filter_dict = self.get_fts_filter(model, fts_text) if fts_filter_dict: self.fts_mode = True fts_q = Q(**fts_filter_dict) else: fts_q = Q() return field_filter_q_list, field_exclude_q_list, fts_q def _create_search_filter(self, filter_list) -> Q: """Apply search filter lists. Separate filter clauses are employed for m2m searches.""" combined_q = Q() for q in filter_list: if not q: continue combined_q &= q self.search_mode = True return combined_q def get_search_filters(self, model) -> tuple[Q, Q, Q]: """Preparse search, search and return the filter and scores.""" include_q = Q() exclude_q = Q() fts_q = Q() try: field_filter_q_list, field_exclude_q_list, fts_q = ( self._create_search_filters(model) ) # Apply filters include_q = self._create_search_filter(field_filter_q_list) exclude_q = self._create_search_filter(field_exclude_q_list) except Exception as exc: msg = "Creating search filters" logger.exception(msg) msg = f"{msg} - {exc}" self.search_error = msg return include_q, exclude_q, fts_q def get_search_limit(self) -> int: """Get search scores for choices and metadata.""" if not self.search_mode: return 0 page = self.kwargs.get("page", 1) return page * BROWSER_MAX_OBJ_PER_PAGE + 1 ================================================ FILE: codex/views/browser/group_mtime.py ================================================ """Group Mtime Function.""" from typing import TYPE_CHECKING from django.db.models.aggregates import Aggregate, Max from django.db.models.functions import Greatest from django.db.utils import OperationalError from loguru import logger from codex.models.functions import JsonGroupArray from codex.views.browser.filters.filter import BrowserFilterView from codex.views.const import EPOCH_START, EPOCH_START_DATETIMEFIELD, NONE_DATETIMEFIELD if TYPE_CHECKING: from django.db.models import Q, Value _FTS5_PREFIX = "fts5: " class BrowserGroupMtimeView(BrowserFilterView): """Annotations that also filter.""" def __init__(self, *args, **kwargs) -> None: """Initialize memoized values.""" super().__init__(*args, **kwargs) self._is_bookmark_filtered: bool | None = None @property def is_bookmark_filtered(self) -> bool: """Is bookmark filter in effect.""" if self._is_bookmark_filtered is None: self._is_bookmark_filtered = bool( self.params.get("filters", {}).get("bookmark") ) return self._is_bookmark_filtered def _handle_operational_error(self, err) -> None: msg = err.args[0] if err.args else "" if msg.startswith(_FTS5_PREFIX): level = "DEBUG" self.search_error = msg.removeprefix(_FTS5_PREFIX) else: level = "WARNING" msg = str(err) logger.log(level, f"Query Error: {msg}") # logger.exception(f"Query Error: {msg}") debug def get_max_bookmark_updated_at_aggregate( self, model, agg_func: type[Aggregate] = Max, default=NONE_DATETIMEFIELD ) -> Aggregate: """Get filtered maximum bookmark updated_at relation.""" bm_rel = self.get_bm_rel(model) bm_filter = self.get_my_bookmark_filter(bm_rel) bmua_rel = f"{bm_rel}__updated_at" kwargs: dict[str, bool | str | Value | Q] = { "default": default, "filter": bm_filter, } if agg_func is JsonGroupArray: kwargs["distinct"] = True kwargs["order_by"] = bmua_rel return agg_func(bmua_rel, **kwargs) # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] def get_group_mtime(self, model, group=None, pks=None, *, page_mtime=False): """Get a filtered mtime for browser pages and mtime checker.""" qs = self.get_filtered_queryset( model, group=group, pks=pks, page_mtime=page_mtime, bookmark_filter=self.is_bookmark_filtered, ) # For group models, traverse to Comic.updated_at via rel_prefix. # The group model's own updated_at is not reliably refreshed by # bulk_update / bulk_create(update_conflicts) because auto_now # only fires on Model.save(). prefix = self.get_rel_prefix(model) mua = Max(prefix + "updated_at", default=EPOCH_START_DATETIMEFIELD) mbua = self.get_max_bookmark_updated_at_aggregate( model, default=EPOCH_START_DATETIMEFIELD ) try: qs = qs.annotate(max=Greatest(mua, mbua)) # Forcing inner joins makes search work # Can't run demote_joins on aggregate. qs = self.force_inner_joins(qs) first = qs.first() mtime = first.max if first else EPOCH_START except OperationalError as exc: self._handle_operational_error(exc) mtime = None if mtime == NotImplemented: mtime = None return mtime ================================================ FILE: codex/views/browser/metadata/__init__.py ================================================ """Aggregate Group and Comic Metadata View.""" from typing import Any, override from django.db.models import QuerySet, Sum from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.exceptions import NotFound from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.choices.admin import AdminFlagChoices from codex.serializers.browser.metadata import MetadataSerializer from codex.serializers.browser.settings import BrowserFilterChoicesInputSerializer from codex.views.browser.metadata.const import SUM_FIELDS from codex.views.browser.metadata.copy_intersections import ( MetadataCopyIntersectionsView, ) class MetadataView(MetadataCopyIntersectionsView): """Aggregate Group and Comic Metadata View.""" serializer_class: type[BaseSerializer] | None = MetadataSerializer input_serializer_class: type[BrowserFilterChoicesInputSerializer] = ( # pyright: ignore[reportIncompatibleVariableOverride] BrowserFilterChoicesInputSerializer ) TARGET: str = "metadata" ADMIN_FLAGS = (AdminFlagChoices.FOLDER_VIEW,) @override def _get_valid_browse_nav_groups(self, valid_top_groups) -> tuple: """Limited allowed nav groups for metadata.""" # Overrides method in browser.validate group = self.kwargs["group"] return (group,) def _raise_not_found(self, exc=None) -> None: """Raise an exception if the object is not found.""" pks = self.kwargs["pks"] group = self.kwargs["group"] detail = f"Filtered metadata for {group}/{pks} not found" raise NotFound(detail=detail) from exc def _get_first_object(self, qs: QuerySet): obj = qs[0] if not obj: reason = "Empty obj" raise ValueError(reason) return obj def _aggregate_multi_pk_sums(self, filtered_qs, obj): """ Aggregate sum fields across multiple selected items. When multiple pks are selected for any group model, qs[0] only returns the first item's values. This method computes the correct sums across all selected items using rel_prefix to traverse from the group model to the Comic fields. """ aggs = {} for field in SUM_FIELDS: full_field = self.rel_prefix + field aggs[field] = Sum(full_field) sums = self.force_inner_joins(filtered_qs).aggregate(**aggs) for field, value in sums.items(): if value is not None: setattr(obj, field, value) return obj @override def get_object(self) -> Any: """Create a comic-like object from the current browser group.""" # Comic model goes through the same code path as groups because # values dicts don't copy relations to the serializer. The values # dict is necessary because of the folders view union in browser.py. qs = self.get_filtered_queryset(self.model) filtered_qs = qs # Annotate qs = self.annotate_order_aggregates(qs) qs = self.annotate_card_aggregates(qs) qs = self.annotate_values_and_fks(qs, filtered_qs) qs = self.add_group_by(qs) qs = self.force_inner_joins(qs) # Get Object try: obj = self._get_first_object(qs) except (IndexError, ValueError) as exc: return self._raise_not_found(exc) # Aggregate sum fields for multi-pk selections pks = self.kwargs.get("pks", ()) if len(pks) > 1: obj = self._aggregate_multi_pk_sums(filtered_qs, obj) # Hacks to add to object after query groups, fk_intersections, m2m_intersections = self.query_intersections( filtered_qs ) return self.copy_intersections_into_comic_fields( obj, groups, fk_intersections, m2m_intersections ) @extend_schema(parameters=[input_serializer_class]) def get(self, *_args, **_kwargs) -> Response: """Get metadata for a filtered browse group.""" # Init try: obj = self.get_object() serializer = self.get_serializer(obj) return Response(serializer.data) except Exception: logger.exception(f"Getting metadata {self.kwargs}") raise ================================================ FILE: codex/views/browser/metadata/annotate.py ================================================ """Metadata Annotations.""" from django.db.models import Count, IntegerField, Sum, Value from django.db.models.query import QuerySet from codex.models import Comic from codex.views.browser.annotate.card import BrowserAnnotateCardView from codex.views.browser.metadata.const import ( ADMIN_OR_FILE_VIEW_ENABLED_COMIC_VALUE_FIELDS, COMIC_RELATED_VALUE_FIELDS, COMIC_VALUE_FIELD_NAMES, COMIC_VALUE_FIELDS_CONFLICTING, COMIC_VALUE_FIELDS_CONFLICTING_PREFIX, PATH_GROUPS, SUM_FIELDS, ) class MetadataAnnotateView(BrowserAnnotateCardView): """Metadata Annotations.""" def _get_comic_value_fields(self) -> tuple: """Include the path field for staff.""" fields = set(COMIC_VALUE_FIELD_NAMES) group = self.kwargs["group"] if ( not (self.is_admin and self.admin_flags["folder_view"]) or group not in PATH_GROUPS ): fields -= ADMIN_OR_FILE_VIEW_ENABLED_COMIC_VALUE_FIELDS return tuple(fields) @staticmethod def _intersection_annotate_separate_sum_fields( fields, ) -> tuple[tuple[str, ...], tuple[str, ...]]: """Separate sum fields (direct aggregation expressions) from intersection fields (need distinct-count check).""" sum_fields = [] intersection_fields = [] for field in fields: if field in SUM_FIELDS: sum_fields.append(field) else: intersection_fields.append(field) return tuple(sum_fields), tuple(intersection_fields) def _intersection_annotate_count_sum_fields( self, sum_fields: tuple[str, ...], annotation_prefix: str, qs: QuerySet ) -> QuerySet: # Annotate sum fields directly as aggregate expressions. for field in sum_fields: ann_field = annotation_prefix + field.replace("__", "_") full_field = self.rel_prefix + field qs = qs.annotate(**{ann_field: Sum(full_field)}) return qs def _intersection_annotate_count_intersection_fields( self, filtered_qs: QuerySet, intersection_fields: tuple[str, ...] ): """ Batch query 1: Get distinct counts for ALL intersection fields at once. aggregate() collapses all rows into one result without GROUP BY, completely avoiding the group_by/subquery conflict. """ count_base = self.force_inner_joins(filtered_qs) agg_kwargs = {} for field in intersection_fields: full_field = self.rel_prefix + field agg_kwargs[field] = Count(full_field, distinct=True) distinct_counts = count_base.aggregate(**agg_kwargs) # Keep only fields where all comics share exactly one distinct value. return tuple(f for f in intersection_fields if distinct_counts.get(f) == 1) def _intersection_annotate_fetch_intersecting_values( self, filtered_qs: QuerySet, related_suffix: str, single_value_fields: tuple[str, ...], ) -> tuple[str, ...]: """ Batch query 2: Fetch the shared value for all qualifying fields in a single query. Since each field has exactly 1 distinct value, any row's value is representative. """ value_base = self.force_inner_joins(filtered_qs) full_fields = tuple( self.rel_prefix + f + related_suffix for f in single_value_fields ) return tuple(value_base.values_list(*full_fields).first()) def _intersection_annotate( self, querysets, fields, related_suffix="", annotation_prefix="", ) -> tuple: """ Annotate the intersection of value and fk fields. For each field, check if all comics in the filtered queryset share exactly one distinct value. If so, annotate that value as a constant. Uses aggregate() to batch all distinct-count checks into a single query, then fetches all single-valued fields in one more query. This replaces the previous approach of 2 queries per field. """ (filtered_qs, qs) = querysets sum_fields, intersection_fields = ( self._intersection_annotate_separate_sum_fields(fields) ) qs = self._intersection_annotate_count_sum_fields( sum_fields, annotation_prefix, qs ) if not intersection_fields: return filtered_qs, qs single_value_fields = self._intersection_annotate_count_intersection_fields( filtered_qs, intersection_fields ) if not single_value_fields: return filtered_qs, qs row = self._intersection_annotate_fetch_intersecting_values( filtered_qs, related_suffix, single_value_fields ) if not row: return filtered_qs, qs # Annotate each single-value field as a Value constant. for field, val in zip(single_value_fields, row, strict=True): ann_field = annotation_prefix + field.replace("__", "_") if field.endswith("count"): output_field = IntegerField() else: output_field = Comic._meta.get_field(field) qs = qs.annotate(**{ann_field: Value(val, output_field)}) return filtered_qs, qs def annotate_values_and_fks(self, qs, filtered_qs): """Annotate comic values and comic foreign key values.""" # Simple Values querysets = (filtered_qs, qs) if qs.model is not Comic: comic_value_fields = self._get_comic_value_fields() querysets = self._intersection_annotate(querysets, comic_value_fields) # Conflicting Simple Values querysets = self._intersection_annotate( querysets, COMIC_VALUE_FIELDS_CONFLICTING, annotation_prefix=COMIC_VALUE_FIELDS_CONFLICTING_PREFIX, ) # Foreign Keys with special count values _, qs = self._intersection_annotate( querysets, COMIC_RELATED_VALUE_FIELDS, ) return qs ================================================ FILE: codex/views/browser/metadata/const.py ================================================ """Metadata view consts.""" from types import MappingProxyType from codex.models import Comic from codex.models.groups import Folder, Imprint, Publisher, Series, Volume from codex.models.identifier import Identifier from codex.models.named import Credit, SeriesGroup, StoryArcNumber, Universe ############ # Annotate # ############ COMIC_VALUE_FIELDS_CONFLICTING = frozenset( { "created_at", "name", "path", "updated_at", } ) COMIC_VALUE_FIELDS_CONFLICTING_PREFIX = "conflict_" PATH_GROUPS = frozenset({"c", "f"}) ADMIN_OR_FILE_VIEW_ENABLED_COMIC_VALUE_FIELDS = frozenset({"path"}) _DISABLED_VALUE_FIELD_NAMES = frozenset( {"id", "pk", "sort_name", "stat"} | COMIC_VALUE_FIELDS_CONFLICTING ) COMIC_VALUE_FIELD_NAMES = frozenset( # contains path field.name for field in Comic._meta.get_fields() if not field.is_relation and field.name not in _DISABLED_VALUE_FIELD_NAMES ) COMIC_RELATED_VALUE_FIELDS = frozenset({"series__volume_count", "volume__issue_count"}) SUM_FIELDS = frozenset({"page_count", "size"}) ######### # Query # ######### _CREDIT_ONLY = ("role", "person") _CREDIT_PREFETCH = (*_CREDIT_ONLY, "role__identifier", "person__identifier") GROUP_MODELS = MappingProxyType( { "i": (Publisher,), "s": (Publisher, Imprint), "v": (Publisher, Imprint, Series), "c": (Publisher, Imprint, Series, Volume), "f": (Publisher, Imprint, Series, Volume), "a": (Publisher, Imprint, Series, Volume), } ) M2M_QUERY_OPTIMIZERS = MappingProxyType( { Credit: { "prefetch": _CREDIT_PREFETCH, "select": (), "only": _CREDIT_ONLY, }, StoryArcNumber: { "prefetch": ("story_arc", "story_arc__identifier"), "select": (), "only": ("story_arc", "number"), }, Identifier: { "select": ("source",), "only": ("source", "key", "url"), }, Universe: {"only": ("name", "designation", "identifier")}, SeriesGroup: { "select": (), "only": ("name",), }, Folder: { "select": (), "only": ("path",), }, } ) COMIC_MAIN_FIELD_NAME_BACK_REL_MAP = MappingProxyType( { "main_character": "main_character_in_comics", "main_team": "main_team_in_comics", } ) ================================================ FILE: codex/views/browser/metadata/copy_intersections.py ================================================ """Copy Intersections Into Comic Fields.""" from codex.models.comic import Comic from codex.serializers.browser.metadata import PREFETCH_PREFIX from codex.views.browser.metadata.const import ( COMIC_VALUE_FIELDS_CONFLICTING, COMIC_VALUE_FIELDS_CONFLICTING_PREFIX, PATH_GROUPS, ) from codex.views.browser.metadata.query_intersections import ( MetadataQueryIntersectionsView, ) _PREFETCH_DICT_FIELDS = frozenset({"identifiers", "credits", "story_arc_numbers"}) class MetadataCopyIntersectionsView(MetadataQueryIntersectionsView): """Copy Intersections Into Comic Fields.""" def _path_security(self, obj) -> None: """Secure filesystem information for acl situation.""" group = self.kwargs["group"] is_path_group = group in PATH_GROUPS if is_path_group: if self.is_admin: return if self.admin_flags["folder_view"]: obj.path = obj.search_path() else: obj.path = "" def _highlight_current_group(self, obj) -> None: """Values for highlighting the current group.""" if self.model and self.model is not Comic: # move the name of the group to the correct field field = self.model.__name__.lower() + "_list" group_list = self.model.objects.filter(pk__in=obj.ids).values("name") setattr(obj, field, group_list) obj.name = None @classmethod def _copy_m2m_intersections(cls, obj, m2m_intersections) -> None: """Copy the m2m intersections into the object.""" # It might even be faster to copy everything to a dict and not use the obj. for key, qs in m2m_intersections.items(): serializer_key = ( f"{PREFETCH_PREFIX}{key}" if key in _PREFETCH_DICT_FIELDS else key ) if hasattr(obj, serializer_key): # real db fields need to use their special set method. field = getattr(obj, serializer_key) field.set(qs, clear=True) else: # fake db field is just a queryset attached. setattr(obj, serializer_key, qs) @staticmethod def _copy_groups(obj, groups) -> None: for field, group_qs in groups.items(): setattr(obj, field + "_list", group_qs) @staticmethod def _copy_fks(obj, fks) -> None: for field, fk_qs in fks.items(): setattr(obj, field, fk_qs.first()) @staticmethod def _copy_conflicting_simple_fields(obj) -> None: for field in COMIC_VALUE_FIELDS_CONFLICTING: """Copy conflicting fields over naturral fields.""" conflict_field = COMIC_VALUE_FIELDS_CONFLICTING_PREFIX + field val = getattr(obj, conflict_field, None) setattr(obj, field, val) def copy_intersections_into_comic_fields( self, obj, groups, fk_intersections, m2m_intersections ): """Copy a bunch of values that i couldn't fit cleanly in the main queryset.""" self._path_security(obj) self._highlight_current_group(obj) self._copy_groups(obj, groups) self._copy_fks(obj, fk_intersections) self._copy_m2m_intersections(obj, m2m_intersections) if self.model is not Comic: self._copy_conflicting_simple_fields(obj) return obj ================================================ FILE: codex/views/browser/metadata/query_intersections.py ================================================ """Metadata query fk & m2m intersections.""" from django.db.models import CharField, Count, F, ManyToManyField, Value from django.db.models.query import QuerySet from codex.librarian.scribe.importer.const import COMIC_FK_FIELDS, COMIC_M2M_FIELDS from codex.models import Comic from codex.models.functions import JsonGroupArray from codex.models.groups import Volume from codex.views.browser.metadata.annotate import MetadataAnnotateView from codex.views.browser.metadata.const import ( COMIC_MAIN_FIELD_NAME_BACK_REL_MAP, GROUP_MODELS, M2M_QUERY_OPTIMIZERS, ) from codex.views.const import METADATA_GROUP_RELATION, MODEL_REL_MAP class MetadataQueryIntersectionsView(MetadataAnnotateView): """Metadata query fk & m2m intersections.""" def _query_groups(self) -> dict: """Query the through models to show group lists.""" groups = {} if not self.model: return groups group = self.kwargs["group"] rel = METADATA_GROUP_RELATION.get(group) if not rel: return groups rel = rel + "__in" pks = self.kwargs["pks"] group_filter = {rel: pks} for model in GROUP_MODELS.get(group, ()): field_name = MODEL_REL_MAP[model] qs = model.objects.filter(**group_filter) only = ["name"] if model is Volume: only.append("number_to") qs = qs.only(*only).distinct() qs = qs.group_by(*only) # pyright: ignore[reportAttributeAccessIssue] qs = qs.annotate(ids=JsonGroupArray("id", distinct=True, order_by="id")) qs = qs.values("ids", *only) groups[field_name] = qs return groups def _get_comic_pks(self, filtered_qs: QuerySet) -> frozenset[int]: pk_field = self.rel_prefix + "pk" comic_pks = filtered_qs.distinct().values_list(pk_field, flat=True) # In fts mode the join doesn't work for the query. # Evaluating it now is probably faster than running the filter for every m2m anyway. return frozenset(comic_pks) def _get_fk_intersection_query( self, field_name: str, comic_pks: frozenset[int], rel: str ): """Get intersection query for one field.""" model = Comic._meta.get_field(field_name).related_model if not model: reason = f"No model found for comic field: {field_name}" raise ValueError(reason) rel += "__in" intersection_qs = model.objects.filter(**{rel: comic_pks}) intersection_qs = intersection_qs.alias(count=Count("comic")).filter( count=len(comic_pks) ) return intersection_qs.only("name") def _query_fk_intersections(self, comic_pks: frozenset[int]) -> dict: fk_intersections = {} for field in COMIC_FK_FIELDS: rel = COMIC_MAIN_FIELD_NAME_BACK_REL_MAP.get(field.name, "comic__pk") fk_intersections[field.name] = self._get_fk_intersection_query( field.name, comic_pks, rel ) return fk_intersections @staticmethod def _get_m2m_intersection_query( field: ManyToManyField, comic_pks: frozenset[int], num_comics: int ) -> QuerySet: """Build a through table queryst for a ManyToManyField.""" through = field.remote_field.through # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] rel_field_name = field.m2m_reverse_field_name() qs = through.objects.filter(comic_id__in=comic_pks) qs = qs.values(related_id=F(rel_field_name + "_id")) qs = qs.annotate(cnt=Count("comic_id")) qs = qs.filter(cnt=num_comics) qs = qs.annotate(field_name=Value(field.name, output_field=CharField())) return qs.values_list("field_name", "related_id") @staticmethod def _get_optimized_m2m_query(qs): optimizers = M2M_QUERY_OPTIMIZERS.get(qs.model, {}) if prefetch := optimizers.get("prefetch"): qs = qs.prefetch_related(*prefetch) if select := optimizers.get("select", ("identifier",)): qs = qs.select_related(*select) only = optimizers.get("only", ("name", "identifier")) return qs.only(*only) def _query_m2m_intersections(self, comic_pks: frozenset[int]) -> dict: """Query m2m intersections with a single query.""" num_comics = len(comic_pks) # Build one union query across all through tables queries = [] for field in COMIC_M2M_FIELDS: qs = self._get_m2m_intersection_query(field, comic_pks, num_comics) queries.append(qs) combined = queries[0].union(*queries[1:], all=True) # Partition results by field name pk_map: dict[str, list[int]] = {} for field_name, related_id in combined: pk_map.setdefault(field_name, []).append(related_id) # Hydrate with ORM querysets (preserves select/prefetch optimizers) m2m_intersections = {} for field in COMIC_M2M_FIELDS: pks = pk_map.get(field.name, []) qs = field.related_model.objects.filter(pk__in=pks) m2m_intersections[field.name] = self._get_optimized_m2m_query(qs) return m2m_intersections def query_intersections(self, filtered_qs) -> tuple[dict, dict, dict]: """Query complex intersections.""" groups = self._query_groups() comic_pks = self._get_comic_pks(filtered_qs) fk_intersections = self._query_fk_intersections(comic_pks) m2m_intersections = self._query_m2m_intersections(comic_pks) return groups, fk_intersections, m2m_intersections ================================================ FILE: codex/views/browser/mtime.py ================================================ """Get the mtimes for the submitted groups.""" from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.models.groups import Publisher from codex.serializers.browser.mtime import GroupsMtimeSerializer, MtimeSerializer from codex.util import max_none from codex.views.browser.group_mtime import BrowserGroupMtimeView from codex.views.const import GROUP_MODEL_MAP class MtimeView(BrowserGroupMtimeView): """Get the mtimes for the submitted groups.""" input_serializer_class: type[GroupsMtimeSerializer] = GroupsMtimeSerializer # pyright: ignore[reportIncompatibleVariableOverride] serializer_class: type[BaseSerializer] | None = MtimeSerializer TARGET: str = "mtime" def _get_group_mtime(self, item): """Get one group's mtimes.""" group = item["group"] pks = item["pks"] model = GROUP_MODEL_MAP[group] if not model: model = Publisher return self.get_group_mtime(model, group, pks) def get_max_groups_mtime(self): """Get max mtime for all groups.""" max_mtime = None for item in self.params["groups"]: mtime = self._get_group_mtime(item) max_mtime = max_none(max_mtime, mtime) return max_mtime @extend_schema(parameters=[GroupsMtimeSerializer]) def get(self, *args, **kwargs) -> Response: """Get the mtimes for the submitted groups.""" max_mtime = self.get_max_groups_mtime() # Serialize Response result = {"max_mtime": max_mtime} serializer = self.get_serializer(result) return Response(serializer.data) ================================================ FILE: codex/views/browser/order_by.py ================================================ """Base view for ordering the query.""" from codex.models import Comic from codex.models.groups import Volume from codex.views.browser.group_mtime import BrowserGroupMtimeView class BrowserOrderByView(BrowserGroupMtimeView): """Base class for views that need ordering.""" def __init__(self, *args, **kwargs) -> None: """Initialize memoized vars.""" super().__init__(*args, **kwargs) self._order_key: str = "" self._comic_sort_names: tuple[str, ...] = () @property def order_key(self) -> str: """Get the default order key for the view.""" if not self._order_key: order_key: str = self.params["order_by"] if (order_key == "search_score" and not self.fts_mode) or ( (order_key == "filename" and not self.admin_flags["folder_view"]) or (order_key == "child_count" and self.TARGET == "cover") ): order_key = "sort_name" self._order_key = order_key return self._order_key def _add_comic_order_by(self, order_key, comic_sort_names) -> list: """Order by for comics (and covers).""" if not order_key: order_key = self.order_key if order_key == "child_count": order_key = "sort_name" if order_key == "sort_name": if not comic_sort_names: comic_sort_names = self._comic_sort_names order_fields_head = [ *comic_sort_names, "issue_number", "issue_suffix", "collection_title", "sort_name", ] else: # Comic orders on indexed fields directly # Which is allegedly faster than using tmp b-trees (annotations) # And since that's every cover sort, it's worth it. order_fields_head = [order_key] # Comic order micro optimizations if order_key == "story_arc_number": order_fields_head += ["date"] elif order_key == "bookmark_updated_at": order_fields_head += ["bookmark_updated_at"] return order_fields_head def add_order_by(self, qs, order_key="", comic_sort_names=None): """Create the order_by list.""" if qs.model is Comic: order_fields_head = self._add_comic_order_by(order_key, comic_sort_names) elif qs.model is Volume and order_key == "sort_name": order_fields_head = ["name", "number_to"] else: order_fields_head = ["order_value"] order_fields = (*order_fields_head, "pk") prefix = "-" if self.params.get("order_reverse") else "" if prefix: order_by = [prefix + field for field in order_fields] else: order_by = order_fields return qs.order_by(*order_by) ================================================ FILE: codex/views/browser/page_in_bounds.py ================================================ """Browser Page Bounds Checking.""" from typing import Any from loguru import logger from codex.views.browser.annotate.card import BrowserAnnotateCardView class BrowserPageInBoundsView(BrowserAnnotateCardView): """Browser Page Bounds Checking.""" def _get_back_one_page_route(self, num_pages) -> dict[str, Any]: """Get max page if oob or 1.""" logger.debug("Redirect back one page.") group = self.kwargs.get("group") pks = self.kwargs.get("pks") page = self.kwargs.get("page", 1) new_page = num_pages if num_pages and page > num_pages else 1 pks = pks or (0,) return {"group": group, "pks": pks, "page": new_page} def _get_up_page_redirect(self) -> tuple[dict, None]: """Get a parent route to redirect to when page is out of bounds.""" group = self.kwargs.get("group") if group not in ("f", "a"): group = "r" route_mask = {"group": group, "pks": (), "page": 1} logger.debug("Redirect up a level.") return route_mask, None def _handle_page_out_of_bounds(self, num_pages) -> None: """Handle out of bounds redirect.""" # Try to find a logical page to run to. group = self.kwargs.get("group") page = self.kwargs.get("page", 1) pks = self.kwargs.get("pks") reason = f"{group=} {pks=} {page=} does not exist." # Adjust route mask for redirect if num_pages and page > 1: route_mask = self._get_back_one_page_route(num_pages) settings_mask = None else: # This now only occurs when page < 1 route_mask, settings_mask = self._get_up_page_redirect() self.raise_redirect(reason, route_mask=route_mask, settings_mask=settings_mask) def check_page_in_bounds(self, num_pages: int) -> None: """Redirect page out of bounds.""" page = self.kwargs.get("page", 1) if page == 1 or (page >= 1 and page <= num_pages): # Don't redirect if on the root page for the group. # Or page within valid range. return self._handle_page_out_of_bounds(num_pages) ================================================ FILE: codex/views/browser/paginate.py ================================================ """Browser pagination.""" from math import ceil from django.core.paginator import EmptyPage, Paginator from django.db.models.query import QuerySet from loguru import logger from codex.settings import BROWSER_MAX_OBJ_PER_PAGE from codex.views.browser.page_in_bounds import BrowserPageInBoundsView class BrowserPaginateView(BrowserPageInBoundsView): """Paginate Groups and Books.""" def _paginate_section(self, qs: QuerySet, page: int) -> QuerySet: """Paginate a group or Comic section.""" orphans = 0 if self.model_group == "f" or self.params.get("search") else 5 paginator = Paginator(qs, BROWSER_MAX_OBJ_PER_PAGE, orphans=orphans) try: paginator_page = paginator.page(page) qs = paginator_page.object_list except EmptyPage: if self.model_group != "f": model_name = qs.model.__name__ if qs.model else "UnknownGroup" logger.warning(f"No {model_name}s on page {page}") qs = qs.model.objects.none() return qs def _paginate_groups(self, group_qs: QuerySet): """Paginate the group object list before books.""" page = self.kwargs.get("page", 1) return self._paginate_section(group_qs, page) def _paginate_books(self, book_qs, total_group_count, page_group_count) -> QuerySet: """Paginate the book object list based on how many group/folders are showing.""" group_remainder = total_group_count % BROWSER_MAX_OBJ_PER_PAGE num_books_on_mixed_page = BROWSER_MAX_OBJ_PER_PAGE - group_remainder if page_group_count: # There are books after the groups on the same page # Add remainder books without the paginator page_book_qs = book_qs[:num_books_on_mixed_page] else: # There are books after the groups on a new page book_offset = 0 if not group_remainder else num_books_on_mixed_page page_book_qs = book_qs[book_offset:] # Which book page are we on after groups? page = self.kwargs.get("page", 1) num_group_and_mixed_pages = ceil( total_group_count / BROWSER_MAX_OBJ_PER_PAGE ) book_only_page = page - num_group_and_mixed_pages page_book_qs = self._paginate_section(page_book_qs, book_only_page) return page_book_qs def paginate( self, group_qs: QuerySet, book_qs: QuerySet, group_count: int ) -> tuple[QuerySet, QuerySet, int, int]: """Paginate the queryset into a group and book object lists.""" if self.TARGET == "opds2": self._opds_number_of_books = book_qs.count() self._opds_number_of_groups = group_count page_group_qs = self._paginate_groups(group_qs) page_group_count = page_group_qs.count() page_book_qs = self._paginate_books(book_qs, group_count, page_group_count) page_book_count = page_book_qs.count() return page_group_qs, page_book_qs, page_group_count, page_book_count ================================================ FILE: codex/views/browser/params.py ================================================ """Parse browser params.""" from collections.abc import Mapping, MutableMapping from types import MappingProxyType from typing import Any from loguru import logger from codex.serializers.browser.settings import ( BrowserSettingsSerializer, BrowserSettingsSerializerBase, ) from codex.views.browser.settings import BrowserSettingsBaseView class BrowserParamsView(BrowserSettingsBaseView): """Browser Params Parsing.""" input_serializer_class: type[BrowserSettingsSerializerBase] = ( BrowserSettingsSerializer ) def __init__(self, *args, **kwargs) -> None: """Initialize properties.""" super().__init__(*args, **kwargs) self._params: MappingProxyType[str, Any] | None = None def init_params(self) -> MutableMapping[str, Any]: """Get params from stored settings and request.""" serializer = self.input_serializer_class(data=self.request.GET) serializer.is_valid(raise_exception=True) params = self.load_params_from_settings() if serializer.validated_data: params.update(serializer.validated_data) return params def _update_last_route(self, data: MutableMapping) -> None: """Save last route to data.""" last_route = data.get("last_route", {}) last_route.update( { "group": self.kwargs.get("group", "r"), "pks": self.kwargs.get("pks", (0,)), "page": self.kwargs.get("page", 1), } ) data["last_route"] = last_route def set_params(self, params: Mapping) -> None: """Manually set the params.""" self._params = MappingProxyType(params) @property def params(self) -> MappingProxyType: """Validate submitted settings and apply them over the session settings.""" if self._params is None: try: params = self.init_params() self._update_last_route(params) self.save_params_to_settings(params) self.set_order_by_default(params) self.set_params(params) except Exception as exc: # for debugging if this goes awry logger.exception(exc) raise return self._params # pyright: ignore[reportReturnType], # ty: ignore[invalid-return-type] ================================================ FILE: codex/views/browser/saved_settings.py ================================================ """Saved browser settings views.""" from types import MappingProxyType from loguru import logger from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.models.base import NamedModel from codex.models.identifier import IdentifierSource from codex.models.named import ( AgeRating, Character, Country, CreditPerson, Genre, Language, Location, OriginalFormat, SeriesGroup, Story, StoryArc, Tag, Tagger, Team, Universe, ) from codex.models.settings import ( ClientChoices, SettingsBrowser, SettingsBrowserFilters, SettingsBrowserLastRoute, ) from codex.serializers.browser.saved import ( SavedBrowserSettingsListSerializer, SavedBrowserSettingsSaveSerializer, SavedSettingsLoadSerializer, ) from codex.views.auth import AuthFilterGenericAPIView from codex.views.settings import ( BROWSER_CREATE_ARGS, BROWSER_FILTER_ARGS, SETTINGS_BROWSER_SELECT_RELATED, SettingsBaseView, ) # Map filter field names to the model whose PKs they store. _FILTER_FK_MODEL_MAP = MappingProxyType( { "age_rating": AgeRating, "characters": Character, "country": Country, "credits": CreditPerson, "genres": Genre, "identifier_source": IdentifierSource, "language": Language, "locations": Location, "original_format": OriginalFormat, "series_groups": SeriesGroup, "stories": Story, "story_arcs": StoryArc, "tagger": Tagger, "tags": Tag, "teams": Team, "universes": Universe, } ) def _validate_filter_field( filters_data: dict, field: str, model: type[NamedModel], warnings: list[str] ): """Validate one filter field by existing models.""" pk_list = filters_data.get(field) if not pk_list or not isinstance(pk_list, list): return # Skip null sentinel values real_pks = frozenset({pk for pk in pk_list if pk is not None}) if not real_pks: return existing_pks = frozenset( model.objects.filter(pk__in=real_pks).values_list("pk", flat=True) ) if bad_pks := frozenset(real_pks - existing_pks): logger.info( f"Saved settings filter {field!r}: removing invalid PK(s): {sorted(bad_pks)}" ) # Keep None sentinels plus valid PKs cleaned = frozenset(pk_list) - bad_pks filters_data[field] = cleaned warnings.append(field) def _validate_filter_pks( filters_data: dict, filters_obj: SettingsBrowserFilters | None = None, ) -> list[str]: """ Validate FK-based filter PKs and strip invalid ones. Modifies *filters_data* in place. When *filters_obj* is provided the cleaned values are persisted back to the database so subsequent loads skip the validation. Returns a list of filter field names that had invalid PKs removed. """ warnings: list[str] = [] for field, model in _FILTER_FK_MODEL_MAP.items(): _validate_filter_field(filters_data, field, model, warnings) # Persist the cleaned filters back to the row. if warnings and filters_obj is not None: for field in warnings: setattr(filters_obj, field, filters_data[field]) filters_obj.save() return warnings class _SavedSettingsOwnerMixin: """Shared user/session resolution for saved-settings views.""" def _get_user_and_session(self): user = self.request.user # pyright: ignore [reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if user and getattr(user, "pk", None): return user, None if not self.request.session.session_key: # pyright: ignore [reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] self.request.session.save() # pyright: ignore [reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] return None, self.request.session.session_key # pyright: ignore [reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] def _owner_kwargs(self): user, session_key = self._get_user_and_session() if user: return {"user": user} return {"session_id": session_key} class SavedBrowserSettingsListView(_SavedSettingsOwnerMixin, AuthFilterGenericAPIView): """GET: list saved settings. POST: save current settings with a name.""" serializer_class: type[BaseSerializer] | None = SavedBrowserSettingsListSerializer def get(self, *args, **kwargs) -> Response: """Return list of saved setting names.""" owner = self._owner_kwargs() qs = ( SettingsBrowser.objects.filter(client=ClientChoices.API, **owner) .exclude(name="") .order_by("name") .values("pk", "name") ) serializer = self.get_serializer({"savedSettings": list(qs)}) return Response(serializer.data) @staticmethod def _copy_settings(source: SettingsBrowser, target: SettingsBrowser): """Copy all settings columns from source to target.""" for key in SettingsBrowser.DIRECT_KEYS: setattr(target, key, getattr(source, key)) target.search = source.search target.show = source.show target.save() # Copy filters src_filters = source.filters # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] tgt_filters = target.filters # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] for key in SettingsBrowserFilters.FILTER_KEYS: setattr(tgt_filters, key, getattr(src_filters, key)) tgt_filters.save() # Copy last_route src_route = source.last_route # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] tgt_route = target.last_route # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] tgt_route.group = src_route.group tgt_route.pks = src_route.pks tgt_route.page = src_route.page tgt_route.save() def post(self, *args, **kwargs) -> Response: """Save current settings with a name. Overwrites if name exists.""" serializer = SavedBrowserSettingsSaveSerializer(data=self.request.data) serializer.is_valid(raise_exception=True) name = serializer.validated_data["name"] user, session_key = self._get_user_and_session() owner = {"user": user} if user else {"session_id": session_key} # Find the current (unnamed) settings row. current = ( SettingsBrowser.objects.filter(client=ClientChoices.API, name="", **owner) .select_related(*SETTINGS_BROWSER_SELECT_RELATED) .first() ) if not current: return Response({"detail": "No current settings found."}, status=404) # Check for existing saved setting with the same name. existing = ( SettingsBrowser.objects.filter(client=ClientChoices.API, name=name, **owner) .select_related(*SETTINGS_BROWSER_SELECT_RELATED) .first() ) if existing: self._copy_settings(current, existing) created = False else: # Create new named setting by cloning the current row. new_sb = SettingsBrowser.objects.create( user=user, session_id=session_key, client=ClientChoices.API, name=name, top_group=current.top_group, order_by=current.order_by, order_reverse=current.order_reverse, search=current.search, custom_covers=current.custom_covers, dynamic_covers=current.dynamic_covers, twenty_four_hour_time=current.twenty_four_hour_time, always_show_filename=current.always_show_filename, show=current.show, ) # Clone filters src_filters = current.filters # pyright: ignore[reportAttributeAccessIssue] new_filters = SettingsBrowserFilters(browser=new_sb) for key in SettingsBrowserFilters.FILTER_KEYS: val = getattr(src_filters, key) if isinstance(val, list): val = list(val) setattr(new_filters, key, val) new_filters.save() # Clone last_route src_route = current.last_route # pyright: ignore[reportAttributeAccessIssue] SettingsBrowserLastRoute.objects.create( browser=new_sb, group=src_route.group, pks=list(src_route.pks) if src_route.pks else [0], page=src_route.page, ) created = True return Response( {"name": name, "created": created}, status=201 if created else 200, ) class SavedBrowserSettingsLoadView(SettingsBaseView): """GET: load a saved setting by pk. DELETE: delete a saved setting.""" MODEL = SettingsBrowser CLIENT = ClientChoices.API FILTER_ARGS = BROWSER_FILTER_ARGS CREATE_ARGS = BROWSER_CREATE_ARGS serializer_class: type[BaseSerializer] | None = SavedSettingsLoadSerializer def _owner_kwargs(self): user = self._get_request_user() session_key = self._ensure_session_key() if user: return {"user": user} return {"session_id": session_key} def get(self, *args, **kwargs) -> Response: """Load saved settings by pk and validate filter PKs.""" pk = self.kwargs["pk"] owner = self._owner_kwargs() saved = ( SettingsBrowser.objects.filter(pk=pk, client=ClientChoices.API, **owner) .exclude(name="") .select_related(*SETTINGS_BROWSER_SELECT_RELATED) .first() ) if not saved: return Response({"detail": "Saved setting not found."}, status=404) # Build the settings dict. data = self.browser_instance_to_dict(saved) # Validate filter PKs, persist cleaned data, and collect warnings. filters_obj = saved.filters # pyright: ignore[reportAttributeAccessIssue] filter_warnings = _validate_filter_pks(data.get("filters", {}), filters_obj) result = { "settings": data, "filterWarnings": filter_warnings, } serializer = self.get_serializer(result) return Response(serializer.data) def delete(self, *args, **kwargs) -> Response: """Delete saved settings by pk.""" pk = self.kwargs["pk"] owner = self._owner_kwargs() deleted_count, _ = ( SettingsBrowser.objects.filter(pk=pk, client=ClientChoices.API, **owner) .exclude(name="") .delete() ) if not deleted_count: return Response({"detail": "Saved setting not found."}, status=404) return Response(status=204) ================================================ FILE: codex/views/browser/settings.py ================================================ """Browser settings views.""" from collections.abc import MutableMapping from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.models.settings import ( ClientChoices, SettingsBrowser, ) from codex.serializers.browser.settings import ( BrowserSettingsInputSerializer, BrowserSettingsSerializer, ) from codex.serializers.settings import SettingsInputSerializer from codex.views.const import FOLDER_GROUP, GROUP_ORDER, STORY_ARC_GROUP from codex.views.settings import ( BROWSER_CREATE_ARGS, BROWSER_FILTER_ARGS, SettingsBaseView, ) class BrowserSettingsBaseView(SettingsBaseView): """Browser settings — model config, order-by default, and reset.""" MODEL = SettingsBrowser CLIENT = ClientChoices.API FILTER_ARGS = BROWSER_FILTER_ARGS CREATE_ARGS = BROWSER_CREATE_ARGS def set_order_by_default(self, params: MutableMapping) -> None: """Set dynamic default for null order_by by group.""" if params["order_by"]: return group = self.kwargs.get("group") order_by = ( "filename" if group == FOLDER_GROUP else "story_arc_number" if group == STORY_ARC_GROUP else "sort_name" ) params["order_by"] = order_by def reset_browser_settings(self) -> dict: """Reset browser settings to model defaults and return the params dict.""" instance: SettingsBrowser = self._get_or_create_settings( # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] self.MODEL, self.CLIENT, self.FILTER_ARGS, self.CREATE_ARGS, ) defaults = self.get_browser_default_params() # Reset direct fields for key in SettingsBrowser.DIRECT_KEYS: setattr(instance, key, defaults[key]) # Reset show FK to default show row self._save_browser_show(instance, defaults["show"]) instance.save() # Reset filters in-place self._save_browser_filters( instance.filters, # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] defaults["filters"], ) # Reset last_route in-place self._save_browser_last_route( instance.last_route, # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] defaults["last_route"], ) return self.browser_instance_to_dict(instance) class BrowserSettingsView(BrowserSettingsBaseView): """Browser settings GET/PATCH/DELETE endpoint.""" input_serializer_class: type[SettingsInputSerializer] = ( BrowserSettingsInputSerializer ) serializer_class: type[BaseSerializer] | None = BrowserSettingsSerializer # ── Validation ────────────────────────────────────────────────── @staticmethod def _validate_browse_top_group(params, group: str, top_group: str) -> None: """Validate top group for browse groups.""" show = params["show"] if group == "r" or ( group in GROUP_ORDER and show.get(top_group) and GROUP_ORDER.index(top_group) < GROUP_ORDER.index(group) ): return for show_group, on in show.items(): if on: params["top_group"] = show_group break else: params["top_group"] = "c" @classmethod def _validate_top_group(cls, params, group: str, top_group: str) -> None: """Validate top group.""" if group == top_group: return if group in "fa": params["top_group"] = group else: cls._validate_browse_top_group(params, group, top_group) def _validate_settings_get(self, validated_data, params: dict) -> dict: """Validate and fix settings on GET.""" # This is a micro version of browser/validate.py # It would be ideal to combine them but browser validate does redirects so maybe later. top_group = params["top_group"] group = validated_data.get("group", "r") if validated_data else "r" self._validate_top_group(params, group, top_group) self.set_order_by_default(params) return params # ── HTTP methods ──────────────────────────────────────────────── @extend_schema(parameters=[BrowserSettingsInputSerializer]) def get(self, *args, **kwargs) -> Response: """Get session settings.""" serializer = self.input_serializer_class(data=self.request.GET) serializer.is_valid(raise_exception=True) validated_data = serializer.validated_data only = validated_data.get("only") if validated_data else None params = self.load_params_from_settings(only=only) params = self._validate_settings_get(validated_data, params) serializer = self.get_serializer(params) return Response(serializer.data) @extend_schema(responses=None) def patch(self, *args, **kwargs) -> Response: """Update session settings.""" data = self.request.data serializer = self.get_serializer(data=data) serializer.is_valid(raise_exception=True) updates = serializer.validated_data params = self.load_params_from_settings() params.update(updates) self.save_params_to_settings(params) serializer = self.get_serializer(params) return Response(serializer.data) @extend_schema(responses=BrowserSettingsSerializer) def delete(self, *args, **kwargs) -> Response: """Reset browser settings to model defaults.""" params = self.reset_browser_settings() self.set_order_by_default(params) serializer = self.get_serializer(params) return Response(serializer.data) ================================================ FILE: codex/views/browser/title.py ================================================ """Browser title.""" from collections.abc import Mapping from codex.models import Comic, Volume from codex.views.browser.breadcrumbs import BrowserBreadcrumbsView class BrowserTitleView(BrowserBreadcrumbsView): """Browser title methods.""" def _get_root_group_name(self) -> tuple: if not self.model: reason = "No model set in browser" raise ValueError(reason) plural = self.model._meta.verbose_name_plural if not plural: reason = f"No plural name for {self.model}" raise ValueError(reason) return plural.capitalize(), 0 def _get_group_name(self) -> tuple: group_number_to = None group_count = 0 group_name = "" if gi := self.group_instance: group_name = gi.name if isinstance(gi, Volume): group_number_to = gi.number_to group_count = gi.series.volume_count elif isinstance(gi, Comic): group_number_to = gi.volume.number_to group_count = gi.volume.issue_count return group_name, group_number_to, group_count def get_browser_page_title(self) -> Mapping: """Get the proper title for this browse level.""" pks = self.kwargs.get("pks") if not pks: group_name, group_count = self._get_root_group_name() group_number_to = None else: group_name, group_number_to, group_count = self._get_group_name() return { "group_name": group_name, "group_number_to": group_number_to, "group_count": group_count, } ================================================ FILE: codex/views/browser/validate.py ================================================ """Browser Settings and URL Validation.""" from collections.abc import Mapping from copy import deepcopy from types import MappingProxyType from typing import Any from loguru import logger from rest_framework.exceptions import NotFound from codex.choices.browser import DEFAULT_BROWSER_ROUTE from codex.models.groups import BrowserGroupModel from codex.util import mapping_to_dict from codex.views.browser.filters.search.parse import SearchFilterView from codex.views.const import ( COMIC_GROUP, FOLDER_GROUP, GROUP_MODEL_MAP, ROOT_GROUP, STORY_ARC_GROUP, ) from codex.views.exceptions import SeeOtherRedirectError class BrowserValidateView(SearchFilterView): """Browser Settings and URL Validation.""" DEFAULT_ROUTE = MappingProxyType( {"name": "browser", "params": DEFAULT_BROWSER_ROUTE} ) def __init__(self, *args, **kwargs) -> None: """Initialize properties.""" super().__init__(*args, **kwargs) self._is_admin: bool | None = None self._model_group: str = "" self._model: type[BrowserGroupModel] | None = None self._rel_prefix: str | None = None self._valid_nav_groups: tuple[str, ...] | None = None @property def model_group(self) -> str: """Memoize the model group.""" if not self._model_group: group = self.kwargs["group"] if group == ROOT_GROUP: group = self.params["top_group"] self._model_group = group return self._model_group @property def model(self) -> type[BrowserGroupModel] | None: """Memoize the model for the browse list.""" if not self._model: model = GROUP_MODEL_MAP.get(self.model_group) if model is None: group = self.kwargs["group"] detail = f"Cannot browse {group=}" logger.debug(detail) raise NotFound(detail=detail) self._model = model return self._model @property def rel_prefix(self) -> str: """Memoize model rel prefix.""" if self._rel_prefix is None: self._rel_prefix = self.get_rel_prefix(self.model) return self._rel_prefix def raise_redirect( self, reason, route_mask=None, settings_mask: Mapping | None = None ) -> None: """Redirect the client to a valid group url.""" route: dict[str, Any] = mapping_to_dict(self.DEFAULT_ROUTE) # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] if route_mask: route["params"].update(route_mask) settings: dict[str, Any] = deepcopy(mapping_to_dict(self.params)) # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] if settings_mask: settings.update(settings_mask) detail = {"route": route, "settings": settings, "reason": reason} raise SeeOtherRedirectError(detail=detail) def _get_valid_browse_top_groups(self) -> list: """ Get valid top groups for the current settings. Valid top groups are determined by the Browser Settings. """ valid_top_groups = [] show = self.params["show"] for nav_group, allowed in show.items(): if allowed: valid_top_groups.append(nav_group) # Issues is always a valid top group valid_top_groups += [COMIC_GROUP] return valid_top_groups def _validate_top_group(self, valid_top_groups) -> None: nav_group = self.kwargs.get("group") top_group = self.params.get("top_group") if top_group not in valid_top_groups: reason = f"top_group {top_group} not in valid nav groups {valid_top_groups}, changed to " if nav_group in valid_top_groups: valid_top_group = nav_group reason += "nav group: " else: valid_top_group = valid_top_groups[0] reason += "first valid top group " reason += valid_top_group pks = self.kwargs.get("pks", ()) page = self.kwargs["page"] route = {"group": nav_group, "pks": pks, "page": page} settings_mask = {"top_group": valid_top_group} self.raise_redirect(reason, route, settings_mask) def _get_valid_browse_nav_groups(self, valid_top_groups) -> tuple: """ Get valid nav groups for the current settings. Valid nav groups are the top group and below that are also enabled in browser settings. May always navigate to root 'r' nav group. """ top_group = self.params["top_group"] nav_group = self.kwargs["group"] valid_nav_groups = [ROOT_GROUP] for possible_index, possible_nav_group in enumerate(valid_top_groups): if top_group == possible_nav_group: # all the nav groups past this point, # 'c' is obscured by the web reader url, but valid for opds tail_top_groups = valid_top_groups[possible_index:] valid_nav_groups += tail_top_groups break if nav_group not in valid_nav_groups: reason = f"Nav group {nav_group} unavailable, redirect to {ROOT_GROUP}" self.raise_redirect(reason) return tuple(valid_nav_groups) def _validate_folder_settings(self) -> tuple: """Check that all the view variables for folder mode are set right.""" # Check folder view admin flag if not self.admin_flags["folder_view"]: reason = "folder view disabled" valid_top_groups = self._get_valid_browse_top_groups() settings_mask = {"top_group": valid_top_groups[0]} self.raise_redirect(reason, settings_mask=settings_mask) valid_top_groups = (FOLDER_GROUP,) self._validate_top_group(valid_top_groups) return valid_top_groups def _validate_browser_group_settings(self) -> tuple: """Check that all the view variables for browser mode are set right.""" # Validate Browser top_group # Change top_group if its not in the valid top groups valid_top_groups = self._get_valid_browse_top_groups() self._validate_top_group(valid_top_groups) # Validate pks nav_group = self.kwargs["group"] pks = self.kwargs["pks"] if nav_group == ROOT_GROUP and (pks and 0 not in pks): # r never has pks reason = f"Redirect r with {pks=} to pks 0" self.raise_redirect(reason) # Validate Browser nav_group # Redirect if nav group is wrong return self._get_valid_browse_nav_groups(valid_top_groups) def _validate_story_arc_settings(self) -> tuple[str, ...]: """Validate story arc settings.""" valid_top_groups = (STORY_ARC_GROUP,) self._validate_top_group(valid_top_groups) return valid_top_groups @property def valid_nav_groups(self) -> tuple[str, ...]: """Memoize valid nav groups.""" if self._valid_nav_groups is None: group = self.kwargs["group"] validate_group = self.params["top_group"] if group == COMIC_GROUP else group if validate_group == FOLDER_GROUP: vng = self._validate_folder_settings() elif validate_group == STORY_ARC_GROUP: vng = self._validate_story_arc_settings() else: vng = self._validate_browser_group_settings() self._valid_nav_groups = vng return self._valid_nav_groups ================================================ FILE: codex/views/const.py ================================================ """Common view constants.""" from datetime import UTC, datetime from types import MappingProxyType from django.contrib.auth.models import Group, User from django.contrib.sessions.models import Session from django.db.models.expressions import Value from django.db.models.fields import DateTimeField, PositiveSmallIntegerField from codex.models import ( AgeRating, BrowserGroupModel, Character, Comic, Country, Credit, CreditPerson, CreditRole, Folder, Genre, Identifier, IdentifierSource, Imprint, Language, Library, Location, OriginalFormat, Publisher, ScanInfo, Series, SeriesGroup, StoryArc, StoryArcNumber, Tag, Tagger, Team, Universe, Volume, ) from codex.settings import CODEX_PATH ROOT_GROUP = "r" FOLDER_GROUP = "f" STORY_ARC_GROUP = "a" COMIC_GROUP = "c" GROUP_NAME_MAP = MappingProxyType( {"p": "publisher", "i": "imprint", "s": "series", "v": "volume"} ) STATIC_IMG_PATH = CODEX_PATH / "static/img" MISSING_COVER_NAME_MAP = MappingProxyType( { **GROUP_NAME_MAP, FOLDER_GROUP: "folder", STORY_ARC_GROUP: "story-arc", } ) MISSING_COVER_FN = "missing-cover-165.webp" MISSING_COVER_PATH = STATIC_IMG_PATH / MISSING_COVER_FN GROUP_RELATION = MappingProxyType( { **GROUP_NAME_MAP, COMIC_GROUP: "pk", FOLDER_GROUP: "parent_folder", STORY_ARC_GROUP: "story_arc_numbers__story_arc", } ) FILTER_ONLY_GROUP_RELATION = MappingProxyType( { **GROUP_RELATION, FOLDER_GROUP: "folders", } ) METADATA_GROUP_RELATION = MappingProxyType( { **GROUP_NAME_MAP, COMIC_GROUP: "comic", FOLDER_GROUP: "comic__folders", STORY_ARC_GROUP: "comic__story_arc_numbers__story_arc", } ) CUSTOM_COVER_GROUP_RELATION = MappingProxyType( {**GROUP_NAME_MAP, FOLDER_GROUP: "folder", STORY_ARC_GROUP: "storyarc"} ) GROUP_ORDER = "rpisv" MODEL_REL_MAP = MappingProxyType( { Publisher: "publisher", Imprint: "imprint", Series: "series", Volume: "volume", Folder: "parent_folder", StoryArc: "story_arc_numbers__story_arc", Comic: "pk", } ) GROUP_MODEL_MAP: MappingProxyType[str, type[BrowserGroupModel] | None] = ( MappingProxyType( { ROOT_GROUP: None, "p": Publisher, "i": Imprint, "s": Series, "v": Volume, COMIC_GROUP: Comic, FOLDER_GROUP: Folder, STORY_ARC_GROUP: StoryArc, } ) ) GROUP_MODELS = ( Publisher, Imprint, Series, Volume, Folder, StoryArc, ) STATS_GROUP_MODELS = ( *GROUP_MODELS, Comic, ) METADATA_MODELS = ( AgeRating, Character, Country, Credit, CreditPerson, CreditRole, Genre, Identifier, IdentifierSource, Language, Location, OriginalFormat, SeriesGroup, ScanInfo, StoryArc, StoryArcNumber, Team, Tag, Tagger, Universe, ) CONFIG_MODELS = ( Library, User, Group, Session, ) GROUP_MTIME_MODEL_MAP = MappingProxyType({"r": Publisher, "a": StoryArc, "f": Folder}) EPOCH_START = datetime.fromtimestamp(0, tz=UTC) ONE_INTEGERFIELD = Value(1, PositiveSmallIntegerField()) NONE_INTEGERFIELD = Value(None, PositiveSmallIntegerField()) NONE_DATETIMEFIELD = Value(None, DateTimeField()) EPOCH_START_DATETIMEFIELD = Value(EPOCH_START) ================================================ FILE: codex/views/download.py ================================================ """Download a comic archive.""" from pathlib import Path from django.http import FileResponse, Http404 from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import extend_schema from codex.models.comic import Comic from codex.views.auth import AuthFilterAPIView class DownloadView(AuthFilterAPIView): """Return the comic archive file as an attachment.""" content_type = "application/vnd.comicbook+zip" AS_ATTACHMENT: bool = True @extend_schema(responses={(200, content_type): OpenApiTypes.BINARY}) def get(self, *_args, **kwargs) -> FileResponse: """Download a comic archive.""" pk = kwargs.get("pk") try: group_acl_filter = self.get_group_acl_filter(Comic, self.request.user) comic = ( Comic.objects.filter(group_acl_filter) .distinct() .only("path", "file_type") .get(pk=pk) ) except Comic.DoesNotExist as err: reason = f"Comic {pk} not not found." raise Http404(reason) from err # FileResponse requires file handle not be closed in this method. comic_file = Path(comic.path).open("rb") # noqa: SIM115 content_type = "application/" if comic.file_type == "PDF": content_type += "pdf" elif comic.file_type: content_type += "vnd.comicbook+" + comic.file_type.lower() else: content_type += "octet-stream" filename = comic.get_filename() return FileResponse( comic_file, as_attachment=self.AS_ATTACHMENT, content_type=content_type, filename=filename, ) class FileView(DownloadView): """View a single comic in the browser.""" AS_ATTACHMENT: bool = False ================================================ FILE: codex/views/error.py ================================================ """Custom Http Error Views.""" from django.http.response import HttpResponseRedirect, JsonResponse from rest_framework.response import Response from rest_framework.views import exception_handler from codex.views.opds.error import OPDS_PATH_PREFIX, codex_opds_exception_handler def codex_exception_handler( exc, context ) -> JsonResponse | None | HttpResponseRedirect | Response: """Assume OPDS clients want redirects instead of errors.""" response = None request = context.get("request") if OPDS_PATH_PREFIX in request.path: response = codex_opds_exception_handler(exc, context) if not response: response = exception_handler(exc, context) return response ================================================ FILE: codex/views/exceptions.py ================================================ """Special Redirect Error.""" from collections.abc import Iterable, Mapping, MutableMapping from copy import deepcopy from pprint import pformat from caseconverter import camelcase from django.core.validators import EMPTY_VALUES from django.http.response import HttpResponseRedirect from django.shortcuts import redirect from django.urls import reverse from loguru import logger from rest_framework.exceptions import APIException from rest_framework.status import HTTP_303_SEE_OTHER from codex.choices.browser import DEFAULT_BROWSER_ROUTE from codex.serializers.route import RouteSerializer from codex.views.util import pop_name _OPDS_REDIRECT_SETTINGS_SNAKE_CASE_KEYS = { "filter", "top_group", "order_by", "order_reverse", } _OPDS_REDIRECT_SETTINGS_KEYS = tuple( sorted( _OPDS_REDIRECT_SETTINGS_SNAKE_CASE_KEYS | {camelcase(key) for key in _OPDS_REDIRECT_SETTINGS_SNAKE_CASE_KEYS} ) ) _REDIRECT_SETTINGS_KEYS = _OPDS_REDIRECT_SETTINGS_KEYS class SeeOtherRedirectError(APIException): """Redirect for 303.""" status_code = HTTP_303_SEE_OTHER default_code = "redirect" default_detail = "redirect to a valid route" @staticmethod def _copy_params_into( params: Mapping, keys: Iterable[str], final_params: MutableMapping ) -> None: """Copy and filter params into another map as camelcase.""" for key in keys: value = params.get(key) if value in EMPTY_VALUES: continue final_params[camelcase(key)] = value def __init__(self, detail) -> None: """Create a response to pass to the exception handler.""" super().__init__(detail) # Copy to edit and not write over refs detail = dict(detail) # Get route params route = detail.get("route", {}) params = route.get("params", DEFAULT_BROWSER_ROUTE) params = pop_name(params) route = deepcopy(route) route["params"] = params # For OPDS self.route_kwargs = params serializer = RouteSerializer(params) route["params"] = serializer.data detail["route"] = route settings = detail.get("settings", {}) filtered_settings = {} self._copy_params_into(settings, _REDIRECT_SETTINGS_KEYS, filtered_settings) detail["settings"] = filtered_settings self.detail = detail logger.debug(f"redirect {pformat(self.detail)}") def _get_query_params(self) -> dict: """Change OPDS settings like the frontend does with error.detail.""" settings = ( self.detail.get("settings", {}) if isinstance(self.detail, Mapping) else {} # ty: ignore[no-matching-overload] ) query_params = {} self._copy_params_into(settings, _OPDS_REDIRECT_SETTINGS_KEYS, query_params) return query_params def get_response(self, url_name) -> HttpResponseRedirect: """Return a Django Redirect Response.""" # only used in codex_exception_handler for opds stuff query = self._get_query_params() url = reverse(url_name, kwargs=dict(self.route_kwargs), query=query) return redirect(url, permanent=False) class NoContent(APIException): """Provide a 204 response.""" ================================================ FILE: codex/views/frontend.py ================================================ """Frontend views.""" from collections.abc import Sequence from rest_framework.permissions import AllowAny, BasePermission from rest_framework.renderers import BaseRenderer, TemplateHTMLRenderer from rest_framework.response import Response from rest_framework.utils.serializer_helpers import ReturnDict from codex.serializers.route import RouteSerializer from codex.views.browser.settings import BrowserSettingsBaseView from codex.views.mixins import UserActiveMixin class IndexView(BrowserSettingsBaseView, UserActiveMixin): """The main app.""" permission_classes: Sequence[type[BasePermission]] = (AllowAny,) renderer_classes: Sequence[type[BaseRenderer]] = [TemplateHTMLRenderer] template_name = "index.html" def _get_last_route(self) -> ReturnDict: """Get the last route from the session.""" last_route = self.get_last_route() serializer = RouteSerializer(last_route) return serializer.data def get(self, *_args, **_kwargs) -> Response: """Get the app index page.""" extra_context = { "last_route": self._get_last_route(), } self.mark_user_active() return Response(extra_context) ================================================ FILE: codex/views/healthcheck.py ================================================ """Docker Healthcheck.""" from django.http import HttpResponse def health_check_view(request) -> HttpResponse: # noqa: ARG001 """Return OK.""" return HttpResponse("Ok") ================================================ FILE: codex/views/lazy_import.py ================================================ """Version View.""" from rest_framework.response import Response from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.scribe.tasks import LazyImportComicsTask from codex.serializers.mixins import OKSerializer from codex.views.auth import AuthGenericAPIView class LazyImportView(AuthGenericAPIView): """Return Codex Versions.""" serializer_class = OKSerializer def get(self, *args, **kwargs) -> Response: """Get Versions.""" group = self.kwargs.get("group", "") if group in "fc": pks = self.kwargs.get("pks", ()) pks = frozenset(pks) LIBRARIAN_QUEUE.put(LazyImportComicsTask(group=group, pks=pks)) serializer = self.get_serializer() return Response(serializer.data) ================================================ FILE: codex/views/mixins.py ================================================ """Cross view annotation methods.""" from typing import TYPE_CHECKING from django.db.models import CharField from django.db.models.expressions import Case, F, Value, When from django.db.models.functions import Concat from codex.librarian.bookmark.tasks import UserActiveTask from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.models.comic import Comic from codex.models.groups import Imprint, Volume from codex.views.const import GROUP_NAME_MAP if TYPE_CHECKING: from rest_framework.request import Request _SHOW_GROUPS = tuple(GROUP_NAME_MAP.keys()) _GROUP_NAME_TARGETS = frozenset({"browser", "opds1", "opds2", "reader"}) _VARIABLE_SHOW = "pi" class SharedAnnotationsMixin: # (BrowserFilterView): """Cross view annotation methods.""" @staticmethod def _get_order_group( nav_group, show, parent_group, index, pks, order_groups ) -> tuple: do_break = False if ( nav_group not in _VARIABLE_SHOW or show.get(nav_group) ) and nav_group == parent_group: watermark = index if pks and len(pks) == 1: watermark += 1 order_groups = _SHOW_GROUPS[watermark:] do_break = True return order_groups, do_break @classmethod def _get_order_groups(cls, parent_group, pks, show) -> tuple: """Annotate sort_name.""" order_groups = () if parent_group != "c": for index, nav_group in enumerate(_SHOW_GROUPS): order_groups, do_break = cls._get_order_group( nav_group, show, parent_group, index, pks, order_groups ) if do_break: break else: order_groups = _SHOW_GROUPS return order_groups @classmethod def get_sort_name_annotations(cls, model, parent_group, pks, show) -> dict: """Annotate sort names for browser subclasses and reader.""" sort_name_annotations = {} if model is Comic: order_groups = cls._get_order_groups(parent_group, pks, show) for order_group in order_groups: group_name = GROUP_NAME_MAP[order_group] ann_name = f"{group_name}_sort_name" name_field = "name" if group_name == "volume" else "sort_name" sort_name = F(f"{group_name}__{name_field}") sort_name_annotations[ann_name] = sort_name elif model is Volume: sort_name_annotations["sort_name"] = F("name") return sort_name_annotations @staticmethod def _volume_name_annotation(model) -> Case: prefix = "volume__" if model is Comic else "" name_rel = prefix + "name" number_to_rel = prefix + "number_to" return Case( When(**{f"{number_to_rel}__isnull": True}, then=F(name_rel)), default=Concat( F(name_rel), Value("-"), F(number_to_rel), ), output_field=CharField(), ) @classmethod def annotate_group_names(cls, qs): """Annotate name fields by hoisting them up.""" # Optimized to only lookup what is used on the frontend target = cls.TARGET # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if target not in _GROUP_NAME_TARGETS: return qs group_names = {} if qs.model in (Comic, Volume): group_names["series_name"] = F("series__name") if qs.model is Comic: if target != "reader": group_names["publisher_name"] = F("publisher__name") if target == "opds2": group_names["imprint_name"] = F("imprint__name") group_names.update( { "volume_name": F("volume__name"), "volume_number_to": F("volume__number_to"), } ) elif qs.model is Imprint: group_names["publisher_name"] = F("publisher__name") return qs.annotate(**group_names) class UserActiveMixin: """View that records user activity.""" def mark_user_active(self) -> None: """Get the app index page.""" if TYPE_CHECKING: self.request: Request # pyright: ignore[reportUninitializedInstanceVariable] if self.request.user and self.request.user.pk: task = UserActiveTask(pk=self.request.user.pk) LIBRARIAN_QUEUE.put(task) ================================================ FILE: codex/views/opds/__init__.py ================================================ """OPDS Views Common to all versions.""" ================================================ FILE: codex/views/opds/auth.py ================================================ """OPDS Authentican mixin.""" from rest_framework.authentication import ( BasicAuthentication, SessionAuthentication, ) from codex.authentication import BearerTokenAuthentication from codex.views.auth import AuthMixin from codex.views.opds.user_agent import get_user_agent_name class OPDSAuthMixin(AuthMixin): """Add Basic Auth.""" authentication_classes = ( BasicAuthentication, BearerTokenAuthentication, SessionAuthentication, ) @property def user_agent_name(self) -> str: """Memoize user agent name.""" if self._user_agent_name is None: # pyright: ignore[reportUnnecessaryComparison] self._user_agent_name = get_user_agent_name(self.request) # pyright: ignore[reportAttributeAccessIssue,reportUninitializedInstanceVariable], # ty: ignore[unresolved-attribute] return self._user_agent_name ================================================ FILE: codex/views/opds/authentication/__init__.py ================================================ """OPDS Authentication Views.""" ================================================ FILE: codex/views/opds/authentication/v1.py ================================================ """OPDS Authentication 1.0.""" from re import DEBUG from types import MappingProxyType from django.contrib.staticfiles.storage import staticfiles_storage from django.http.response import JsonResponse from django.urls import reverse_lazy from rest_framework import status from rest_framework.generics import GenericAPIView from codex.serializers.opds.authentication import OPDSAuthentication1Serializer from codex.views.opds.const import MimeType, UserAgentNames from codex.views.opds.user_agent import get_user_agent_name _LOGO_SIZE = 180 _DOC = MappingProxyType( { "id": reverse_lazy("opds:auth:v1"), "title": "Codex", "description": "Enter your username and password to authenticate", "links": [ { "rel": "logo", "href": staticfiles_storage.url("img/logo-maskable-180.webp"), "type": MimeType.WEBP, "width": _LOGO_SIZE, "height": _LOGO_SIZE, }, { "rel": "help", "href": "https://codex-comic-reader.readthedocs.io/", "type": "text/html", }, { "rel": "register", "href": reverse_lazy("app:start"), "type": "text/html", }, ], "authentication": [ { "type": "http://opds-spec.org/auth/basic", "labels": {"login": "Username", "password": "Password"}, }, ], } ) class OPDSAuthentication1View(GenericAPIView): """Authentication document.""" serializer_class = OPDSAuthentication1Serializer @staticmethod def _absolute_doc(request) -> dict: """Absolutize the logo link url.""" doc = dict(_DOC) logo_link: dict[str, str | int] = doc["links"][0] # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] href = logo_link["href"] href = request.build_absolute_uri(href) logo_link["href"] = href return doc @classmethod def static_get(cls, request, status_code=status.HTTP_200_OK) -> JsonResponse: """Serialize the authentication dict.""" user_agent_name = get_user_agent_name(request) if DEBUG or user_agent_name in UserAgentNames.REQUIRE_ABSOLUTE_URL: doc = cls._absolute_doc(request) else: doc = _DOC serializer = cls.serializer_class(doc) # pyright: ignore[reportOptionalCall] return JsonResponse(serializer.data, status=status_code) def get(self, *args, **kwargs) -> JsonResponse: """Get authentication response.""" return self.static_get(self.request) ================================================ FILE: codex/views/opds/binary.py ================================================ """Binary views with Basic Authentication added.""" from typing import override from rest_framework.negotiation import BaseContentNegotiation from codex.views.browser.cover import CoverView from codex.views.download import DownloadView from codex.views.opds.auth import OPDSAuthMixin from codex.views.opds.settings import OPDSBrowserSettingsMixin from codex.views.reader.page import ReaderPageView class IgnoreClientContentNegotiation(BaseContentNegotiation): """Hack for clients with wild accept headers.""" @override def select_parser(self, request, parsers): """Select the first parser in the `.parser_classes` list.""" return next(iter(parsers)) @override def select_renderer( self, request, renderers, format_suffix="", ) -> tuple: """Select the first renderer in the `.renderer_classes` list.""" renderer = next(iter(renderers)) return (renderer, renderer.media_type) class OPDSCoverView(OPDSBrowserSettingsMixin, CoverView): """Cover View with Basic Auth.""" class OPDSDownloadView(OPDSAuthMixin, DownloadView): """Download View with Basic Auth.""" class OPDSPageView(OPDSAuthMixin, ReaderPageView): """Page View with Basic Auth.""" content_negotiation_class: type[BaseContentNegotiation] = ( # pyright:ignore[reportIncompatibleVariableOverride] IgnoreClientContentNegotiation ) ================================================ FILE: codex/views/opds/const.py ================================================ """OPDS Common consts.""" from types import MappingProxyType from codex.models import ( Character, Genre, Location, SeriesGroup, StoryArc, Tag, Team, ) BLANK_TITLE = "(Empty)" AUTHOR_ROLES = {"Writer", "Author", "Script", "Plot", "Plotter", "Scripter"} OPDS_M2M_MODELS = (Character, Genre, Location, SeriesGroup, StoryArc, Tag, Team) class BookmarkFilters: """Bookmark Filters.""" UNREAD = MappingProxyType({"bookmark": "UNREAD"}) IN_PROGRESS = MappingProxyType({"bookmark": "IN_PROGRESS"}) READ = MappingProxyType({"bookmark": "READ"}) NONE = MappingProxyType({"bookmark": ""}) class Rel: """Link rel strings.""" AUTHENTICATION = "http://opds-spec.org/auth/document" FACET = "http://opds-spec.org/facet" ACQUISITION = "http://opds-spec.org/acquisition" THUMBNAIL = "http://opds-spec.org/image/thumbnail" IMAGE = "http://opds-spec.org/image" STREAM = "http://vaemendis.net/opds-pse/stream" SORT_NEW = "http://opds-spec.org/sort/new" POPULAR = "http://opds-spec.org/sort/popular" FEATURED = "http://opds-spec.org/featured" PROGRESSION = "http://www.cantook.com/api/progression" SELF = "self" START = "start" UP = "up" PREV = "previous" NEXT = "next" ALTERNATE = "alternate" SUB = "subsection" SEARCH = "search" REGISTER = "register" FIRST = "first" LAST = "last" TOP = "top" class MimeType: """Mime Types.""" ATOM = "application/atom+xml" _PROFILE_CATALOG = "profile=opds-catalog" DIVINA = "application/divina+json" NAV = f"{ATOM};{_PROFILE_CATALOG};kind=navigation" ACQUISITION = f"{ATOM};{_PROFILE_CATALOG};kind=acquisition" ENTRY_CATALOG = f"{ATOM};type=entry;{_PROFILE_CATALOG}" AUTHENTICATION = "application/opds-authentication+json" OPENSEARCH = "application/opensearchdescription+xml" STREAM = "image/jpeg" OPDS_JSON = "application/opds+json" OPDS_PUB = "application/opds-publication+json" PROGRESSION = "application/vnd.readium.progression+json" BOOK = "http://schema.org/Book" # COMIC = "https://schema.org/ComicStory" unused JPEG = "image/jpeg" WEBP = "image/webp" HTML = "text/html" AUTH_BASIC = "http://opds-spec.org/auth/basic" COOKIE = "cookie" FILE_TYPE_MAP: MappingProxyType[str, str] = MappingProxyType( { "CBZ": "application/vnd.comicbook+zip", "CBR": "application/vnd.comicbook+rar", "CBT": "application/vnd.comicbook+tar", "CB7": "application/vnd.comicbook+7zip", "PDF": "application/pdf", } ) SIMPLE_FILE_TYPE_MAP: MappingProxyType[str, str] = MappingProxyType( { # PocketBooks needs app/zip "CBZ": "application/zip", "CBR": "application/x-rar-compressed", "CBT": "application/x-tar", "CB7": "application/x-7z-compressed", "PDF": "application/pdf", } ) OCTET = "application/octet-stream" class UserAgentNames: """Control whether to hack in facets with nav links.""" CLIENT_REORDERS = frozenset({"Chunky"}) FACET_SUPPORT = frozenset({"yar"}) # kybooks SIMPLE_DOWNLOAD_MIME_TYPES = frozenset({"PocketBook Reader"}) REQUIRE_ABSOLUTE_URL = frozenset() class TopRoutes: """Routes for top groups.""" ROOT = MappingProxyType({"group": "r", "pks": (0,), "page": 1}) PUBLISHER = MappingProxyType({**ROOT, "group": "p"}) SERIES = MappingProxyType({**ROOT, "group": "s"}) FOLDER = MappingProxyType({**ROOT, "group": "f"}) STORY_ARC = MappingProxyType({**ROOT, "group": "a"}) ================================================ FILE: codex/views/opds/error.py ================================================ """Custom Http Error Views.""" from django.http import HttpRequest from django.http.response import HttpResponseRedirect, JsonResponse from django.shortcuts import redirect from django.urls import reverse from rest_framework import status from rest_framework.response import Response from rest_framework.views import exception_handler from codex.views.exceptions import SeeOtherRedirectError from codex.views.opds.authentication.v1 import OPDSAuthentication1View OPDS_PATH_PREFIX = "opds/v" _OPDS_V2_PATH_PREFIX = OPDS_PATH_PREFIX + "2" _RESET_TOP_GROUP_QUERY = "?topGroup=p" _OPDS_REDIRECT_TO_AUTH_CODES = frozenset({status.HTTP_401_UNAUTHORIZED}) _OPDS_REDIRECT_TO_TOP_CODES = frozenset( { status.HTTP_400_BAD_REQUEST, status.HTTP_403_FORBIDDEN, status.HTTP_404_NOT_FOUND, status.HTTP_405_METHOD_NOT_ALLOWED, status.HTTP_410_GONE, status.HTTP_414_REQUEST_URI_TOO_LONG, } ) def _get_url_name(request: HttpRequest, name_suffix: str) -> str: version = "2" if _OPDS_V2_PATH_PREFIX in request.path else "1" return f"opds:v{version}:{name_suffix}" def _get_redirect_to_start_response(request: HttpRequest) -> HttpResponseRedirect: """Get a redirect to the start.""" name = _get_url_name(request, "start") url = reverse(name) return redirect(url, permanent=False) def codex_opds_exception_handler( exc, context ) -> JsonResponse | None | HttpResponseRedirect | Response: """ Assume OPDS clients want redirects instead of errors. Except for Authentication which requires inline auth JSON. """ response = None request = context.get("request") if isinstance(exc, SeeOtherRedirectError): name = _get_url_name(request, "feed") response = exc.get_response(name) elif status_code := getattr(exc, "status_code", None): if status_code == status.HTTP_403_FORBIDDEN: # Codex presents 403 sometimes when it should be presenting 401 status_code = status.HTTP_401_UNAUTHORIZED if status_code in _OPDS_REDIRECT_TO_AUTH_CODES: response = OPDSAuthentication1View.static_get(request, status_code) elif ( not request.path.endswith("progression") and status_code in _OPDS_REDIRECT_TO_TOP_CODES ): response = _get_redirect_to_start_response(request) if not response: response = exception_handler(exc, context) return response ================================================ FILE: codex/views/opds/feed.py ================================================ """OPDS Browser View.""" from collections.abc import Sequence from rest_framework.throttling import BaseThrottle, ScopedRateThrottle from codex.views.browser.browser import BrowserView from codex.views.mixins import UserActiveMixin from codex.views.opds.settings import OPDSBrowserSettingsMixin class OPDSBrowserView(OPDSBrowserSettingsMixin, UserActiveMixin, BrowserView): """OPDS Browser View.""" throttle_classes: Sequence[type[BaseThrottle]] = (ScopedRateThrottle,) throttle_scope = "opds" def __init__(self, *args, **kwargs) -> None: """Add User Agent Name.""" super().__init__(*args, **kwargs) self._user_agent_name: str | None = None # pyright: ignore[reportIncompatibleUnannotatedOverride] ================================================ FILE: codex/views/opds/metadata.py ================================================ """OPDS Metadata Subqueries.""" from collections.abc import Iterable, Sequence from django.db.models import F from django.db.models.query import QuerySet from codex.models import ( Credit, CreditPerson, ) from codex.views.auth import GroupACLMixin from codex.views.opds.const import OPDS_M2M_MODELS ################# # M2M QuerySets # ################# # These M2M queries could techinally be added to the main query, but # probably only if the desired output format was already known. Like a # json object of model_name-pk, name for opds v1 and a json list of # strings for opds v2. Its actually better from a code standpoint to # not optimize the query like that, I think, and send the several # querysets to the views and templates. def get_credit_people(comic_pks: Sequence[int], roles: Iterable[str], *, exclude: bool): """Get credits that are not authors.""" people = CreditPerson.objects.filter( credit__comic__in=comic_pks, ) if exclude: people = people.exclude(credit__role__name__in=roles) else: people = people.filter(credit__role__name__in=roles) return people.distinct().only("name") def get_credits( comic_pks: Sequence[int], roles: Iterable[str], *, exclude: bool ) -> QuerySet: """Get credits that are not part of other roles.""" credit_qs = Credit.objects.filter(comic__in=comic_pks) if exclude: credit_qs = credit_qs.exclude(role__name__in=roles) else: credit_qs = credit_qs.filter(role__name__in=roles) return credit_qs.annotate(name=F("person__name"), role_name=F("role__name")) def get_m2m_objects(pks: Sequence[int]) -> dict: """Get Category labels.""" cats = {} for model in OPDS_M2M_MODELS: table = model.__name__.lower() rel = GroupACLMixin.get_rel_prefix(model) comic_filter = {rel + "in": pks} qs = model.objects.filter(**comic_filter).only("name").order_by("name") cats[table] = qs return cats ================================================ FILE: codex/views/opds/opensearch/__init__.py ================================================ """OpenSearch Views.""" ================================================ FILE: codex/views/opds/opensearch/v1.py ================================================ """Serve an opensearch v1 document.""" from collections.abc import Sequence from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import extend_schema from rest_framework.throttling import BaseThrottle, ScopedRateThrottle from codex.views.opds.auth import OPDSAuthMixin from codex.views.template import CodexAPIView, CodexXMLTemplateMixin @extend_schema(responses={("200", "application/xml"): OpenApiTypes.BYTE}) class OpenSearch1View(OPDSAuthMixin, CodexXMLTemplateMixin, CodexAPIView): # pyright: ignore[reportIncompatibleVariableOverride] """OpenSearchView.""" template_name = "opds_v1/opensearch_v1.xml" content_type = "application/xml" throttle_classes: Sequence[type[BaseThrottle]] = (ScopedRateThrottle,) throttle_scope = "opensearch" ================================================ FILE: codex/views/opds/settings.py ================================================ """OPDS Browser mixins.""" from abc import ABC from codex.models.settings import ClientChoices from codex.views.opds.auth import OPDSAuthMixin class OPDSSettingsMixin(OPDSAuthMixin, ABC): """OPDS View isolates OPDS settings data.""" CLIENT = ClientChoices.OPDS BROWSER_CLIENT = ClientChoices.OPDS class OPDSBrowserSettingsMixin(OPDSSettingsMixin): """OPDS Browser Settings Mixin.""" ================================================ FILE: codex/views/opds/start.py ================================================ """Common mixin for OPDS Start Page Views.""" from collections.abc import MutableMapping from typing import Any class OPDSStartViewMixin: """Common mixin for OPDS Start Page Views.""" IS_START_PAGE = True def init_params(self) -> MutableMapping[str, Any]: """Hard reset settings to default just by landing on the page.""" return self.get_browser_default_params() # pyright: ignore[reportAttributeAccessIssue], #ty: ignore[unresolved-attribute] def _get_group_queryset(self) -> tuple: """Force empty group query on start page.""" qs = self.model.objects.none().order_by("pk") # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] count = 0 return qs, count ================================================ FILE: codex/views/opds/urls.py ================================================ """OPDS URLs API for popup.""" from django.urls import reverse from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.choices.browser import DEFAULT_BROWSER_ROUTE from codex.serializers.opds.urls import OPDSURLsSerializer from codex.views.auth import AuthGenericAPIView from codex.views.util import pop_name _OPDS_VERSIONS = (1, 2) class OPDSURLsView(AuthGenericAPIView): """OPDS URLs.""" serializer_class: type[BaseSerializer] | None = OPDSURLsSerializer def get(self, *args, **kwargs) -> Response: """Resolve the urls.""" obj = {} route = DEFAULT_BROWSER_ROUTE route = pop_name(route) for version in _OPDS_VERSIONS: key = f"v{version}" name = f"opds:v{version}:start" value = reverse(name, kwargs=dict(route)) obj[key] = value serializer = self.get_serializer(obj) return Response(serializer.data) ================================================ FILE: codex/views/opds/user_agent.py ================================================ """OPDS Get User Agent.""" from rest_framework.request import Request def get_user_agent_name(request: Request) -> str: """Parse User Agent Name from Request.""" if (user_agent := request.headers.get("User-Agent")) and ( user_agent_parts := user_agent.split("/", 1) ): user_agent_name = user_agent_parts[0] else: user_agent_name = "" return user_agent_name ================================================ FILE: codex/views/opds/v1/__init__.py ================================================ """OPDS v1 Views.""" ================================================ FILE: codex/views/opds/v1/const.py ================================================ """OPDS v1 const and data classes.""" import json from collections.abc import Mapping from dataclasses import dataclass from datetime import datetime from types import MappingProxyType from codex.views.opds.const import MimeType, Rel, TopRoutes DEFAULT_FACETS = MappingProxyType( { "topGroup": "p", "orderBy": "sort_name", "orderReverse": "false", } ) @dataclass class TopLink: """A non standard root link when facets are unsupported.""" kwargs: Mapping rel: str mime_type: str query_params: Mapping[str, str | bool | int] glyph: str title: str desc: str url_name: str = "opds:v1:feed" class TopLinks: """Top link definitions.""" START = TopLink( {}, # TopRoutes.ROOT, Rel.START, MimeType.NAV, {}, # {"topGroup": "p"}, "⌂", "Start of catalog", "", "opds:v1:start", ) ALL = (START,) class RootTopLinks: """Top Links that only appear at the root.""" KEEP_READING = TopLink( TopRoutes.SERIES, Rel.FEATURED, MimeType.NAV, { "topGroup": "s", "filters": json.dumps({"bookmark": "UNREAD"}), "orderBy": "bookmark_updated_at", "orderReverse": True, }, "👀", "Keep Reading", "Unread issues, recently read first.", ) NEW_UNREAD = TopLink( TopRoutes.SERIES, Rel.SORT_NEW, MimeType.ACQUISITION, { "topGroup": "s", "orderBy": "created_at", "orderReverse": True, }, "📥", "Latest Unread", "Unread issues, latest added first.", ) OLD_UNREAD = TopLink( TopRoutes.SERIES, Rel.POPULAR, MimeType.NAV, { "topGroup": "s", "filters": json.dumps({"bookmark": "UNREAD"}), "orderBy": "date", }, "📚", "Oldest Unread", "Unread issues, oldest published first", ) ALL = (KEEP_READING, NEW_UNREAD, OLD_UNREAD) @dataclass class OPDS1Link: """An OPDS Link.""" rel: str href: str mime_type: str title: str = "" length: int = 0 facet_group: str = "" facet_active: bool = False thr_count: int = 0 pse_count: int = 0 pse_last_read: int = 0 pse_last_read_date: datetime | None = None @dataclass class FacetGroup: """An opds:facetGroup.""" title_prefix: str query_param: str glyph: str facets: tuple @dataclass class Facet: """An OPDS facet.""" value: str title: str class FacetGroups: """Facet Group definitions.""" ORDER_BY = FacetGroup( "Order By", "orderBy", "➠", ( Facet("date", "Date"), Facet("sort_name", "Name"), Facet("filename", "Filename"), ), ) ORDER_REVERSE = FacetGroup( "Order", "orderReverse", "⇕", (Facet("false", "Ascending"), Facet("true", "Descending")), ) ALL = (ORDER_BY, ORDER_REVERSE) class RootFacetGroups: """Facet Groups that only appear at the root.""" TOP_GROUP = FacetGroup( "", "topGroup", "⊙", ( Facet("p", "Publishers View"), Facet("s", "Series View"), Facet("f", "Folder View"), Facet("a", "Story Arc View"), ), ) ALL = (TOP_GROUP,) class OpdsNs: """XML Namespaces.""" CATALOG = "http://opds-spec.org/2010/catalog" ACQUISITION = "http://opds-spec.org/2010/acquisition" @dataclass class OPDS1EntryObject: """Fake entry db object for top link & facet entries.""" group: str = "" ids: frozenset[int] = frozenset() name: str = "" summary: str = "" fake: bool = True url_name: str = "opds:v1:feed" @dataclass class OPDS1EntryData: """Entry Data class to avoid to many args.""" acquisition_groups: frozenset zero_pad: int metadata: bool mime_type_map: Mapping[str, str] ================================================ FILE: codex/views/opds/v1/entry/__init__.py ================================================ """OPDS v1 Entries.""" ================================================ FILE: codex/views/opds/v1/entry/entry.py ================================================ """OPDS v1 Entry.""" import json from contextlib import suppress from datetime import UTC, datetime from dateutil import parser from django.urls import reverse from loguru import logger from codex.models import Comic from codex.views.opds.const import ( AUTHOR_ROLES, BLANK_TITLE, TopRoutes, ) from codex.views.opds.metadata import ( get_credit_people, get_m2m_objects, ) from codex.views.opds.v1.entry.links import OPDS1EntryLinksMixin class OPDS1Entry(OPDS1EntryLinksMixin): """An OPDS entry object.""" @property def id_tag(self) -> str: """GUID is a nav url.""" # Id top links by query params but not regular entries. return self._nav_href(metadata=self.metadata) @property def title(self) -> str: """Compute the item title.""" result = "" try: parts = [] group = self.obj.group if not self.fake: if group == "i": parts.append(self.obj.publisher_name) elif group == "v": parts.append(self.obj.series_name) elif group == "c": title = Comic.get_title( self.obj, volume=True, name=True, filename_fallback=self.title_filename_fallback, zero_pad=self.zero_pad, ) parts.append(title) if group != "c" and (name := self.obj.name): parts.append(name) result = " ".join(filter(None, parts)) except Exception: logger.exception("Getting OPDS1 title") if not result: result = BLANK_TITLE return result @property def issued(self) -> str: """Return the published date.""" date = "" if self.obj.group == "c": with suppress(Exception): date = self.obj.date.isoformat() return date @property def publisher(self): """Return the publisher.""" return self.obj.publisher_name def _get_datefield(self, key) -> datetime | None: result = None if not self.fake and (value := getattr(self.obj, key, None)): try: if isinstance(value, str): result = parser.parse(value) if isinstance(value, datetime): result = value.astimezone(UTC).isoformat() except ValueError: pass return result # pyright: ignore[reportReturnType] @property def updated(self) -> datetime | None: """When the entry was last updated.""" return self._get_datefield("updated_at") @property def published(self) -> datetime | None: """When the entry was created.""" return self._get_datefield("created_at") @property def language(self): """Return the entry language.""" return self.obj.language @property def summary(self): """Return a child count or comic summary.""" if self.obj.group == "c": desc = self.obj.summary else: children = self.obj.child_count desc = f"{children} issues" return desc @staticmethod def _add_url_to_obj(objs, filter_key) -> list: """Add filter urls to objects.""" result = [] for obj in objs: filters = json.dumps({filter_key: [obj.pk]}) query = {"filters": filters} obj.url = reverse( "opds:v1:feed", kwargs=dict(TopRoutes.SERIES), query=query ) result.append(obj) return result @property def authors(self) -> list: """Get Author names.""" if not self.metadata: return [] people = get_credit_people(self.obj.ids, AUTHOR_ROLES, exclude=False) return self._add_url_to_obj(people, "credits") @property def contributors(self) -> list: """Get Credit names.""" if not self.metadata: return [] people = get_credit_people(self.obj.ids, AUTHOR_ROLES, exclude=True) return self._add_url_to_obj(people, "credits") @property def category_groups(self) -> dict: """Get Category labels.""" if not self.metadata: return {} return get_m2m_objects(self.obj.ids) ================================================ FILE: codex/views/opds/v1/entry/links.py ================================================ """OPDS v1 Entry Links Methods.""" from collections.abc import Mapping from datetime import datetime from math import floor from urllib.parse import quote_plus from comicbox.box import Comicbox from django.urls import reverse from loguru import logger from codex.settings import COMICBOX_CONFIG from codex.views.opds.const import MimeType, Rel from codex.views.opds.v1.const import OPDS1EntryData, OPDS1EntryObject, OPDS1Link class OPDS1EntryLinksMixin: """OPDS v1 Entry Links Methods.""" def __init__( self, obj, query_params: Mapping, data: OPDS1EntryData, *, title_filename_fallback: bool, ) -> None: """Initialize params.""" self.obj = obj self.fake = isinstance(self.obj, OPDS1EntryObject) self.query_params = query_params self.acquisition_groups = data.acquisition_groups self.zero_pad = data.zero_pad self.metadata = data.metadata self.mime_type_map = data.mime_type_map self.title_filename_fallback = title_filename_fallback def _cover_link(self, rel) -> OPDS1Link | None: if self.fake: return None try: kwargs = {"group": self.obj.group, "pks": self.obj.ids} ts = floor(datetime.timestamp(self.obj.updated_at)) query_params = { "customCovers": True, "dynamicCovers": False, "ts": ts, } href = reverse("opds:bin:cover", kwargs=kwargs, query=query_params) return OPDS1Link(rel, href, MimeType.WEBP) except Exception: logger.exception("create thumb") def _nav_href(self, *, metadata: bool) -> str: try: if self.obj.group: pks = sorted(self.obj.ids) kwargs = {"group": self.obj.group, "pks": pks, "page": 1} else: kwargs = {} qps = {} qps.update(self.query_params) if ( self.obj.group == "a" and self.obj.ids and 0 not in self.obj.ids and not self.query_params.get("orderBy") ): # story arcs get ordered by story_arc_number by default qps.update({"orderBy": "story_arc_number"}) if metadata: qps.update({"opdsMetadata": 1}) url_name = getattr(self.obj, "url_name", "opds:v1:feed") return reverse(url_name, kwargs=kwargs, query=qps) except Exception: msg = f"creating nav href for entry {self.obj}" logger.exception(msg) raise def _nav_link(self, *, metadata: bool) -> OPDS1Link: href = self._nav_href(metadata=metadata) group = self.obj.group if group in self.acquisition_groups: mime_type = MimeType.ENTRY_CATALOG if metadata else MimeType.ACQUISITION else: mime_type = MimeType.NAV thr_count = ( 0 if self.fake else 1 if self.obj.group == "c" else self.obj.child_count ) rel = Rel.ALTERNATE if metadata else "subsection" return OPDS1Link(rel, href, mime_type, thr_count=thr_count) def _download_link(self) -> OPDS1Link | None: pk = self.obj.pk if not pk: return None fn = quote_plus(self.obj.get_filename()) kwargs = {"pk": pk, "filename": fn} href = reverse("opds:bin:download", kwargs=kwargs) mime_type = self.mime_type_map.get(self.obj.file_type, MimeType.OCTET) return OPDS1Link(Rel.ACQUISITION, href, mime_type, length=self.obj.size) def lazy_metadata(self) -> bool: """Get barebones metadata lazily to make pse work for chunky-like readers.""" if self.obj.page_count and self.obj.file_type: return False with Comicbox(self.obj.path, config=COMICBOX_CONFIG, logger=logger) as cb: self.obj.page_count = cb.get_page_count() self.obj.file_type = cb.get_file_type() logger.debug(f"Got lazy opds pse metadata for {self.obj.path}") return True def _stream_link(self) -> OPDS1Link | None: pk = self.obj.pk if not pk: return None kwargs = {"pk": pk, "page": 0} qps = {"bookmark": 1} href = reverse("opds:bin:page", kwargs=kwargs, query=qps) href = href.replace("0/page.jpg", "{pageNumber}/page.jpg") page = self.obj.page # extra stupid pse chunky fix for no metadata self.lazy_metadata() pse_count = self.obj.page_count bookmark_updated_at = self.obj.bookmark_updated_at return OPDS1Link( Rel.STREAM, href, MimeType.STREAM, pse_count=pse_count, pse_last_read=page, pse_last_read_date=bookmark_updated_at, ) def _links_comic(self) -> list: """Links for comics.""" result = [] if download := self._download_link(): result += [download] if stream := self._stream_link(): result += [stream] if not self.metadata and (metadata := self._nav_link(metadata=True)): result += [metadata] return result @property def links(self) -> list: """Create all entry links.""" result = [] try: if thumb := self._cover_link(Rel.THUMBNAIL): result += [thumb] if image := self._cover_link(Rel.IMAGE): result += [image] if self.obj.group == "c" and not self.fake: result += self._links_comic() elif nav := self._nav_link(metadata=False): result += [nav] except Exception: msg = f"Getting entry links for {self.obj}" logger.exception(msg) return result ================================================ FILE: codex/views/opds/v1/facets.py ================================================ """OPDS v1 Facets methods.""" from types import MappingProxyType from typing import Any from django.urls import reverse from codex.choices.admin import AdminFlagChoices from codex.models import AdminFlag from codex.views.opds.const import MimeType, Rel, UserAgentNames from codex.views.opds.feed import OPDSBrowserView from codex.views.opds.v1.const import ( DEFAULT_FACETS, FacetGroups, OPDS1EntryData, OPDS1EntryObject, OPDS1Link, RootFacetGroups, ) from codex.views.opds.v1.entry.entry import OPDS1Entry from codex.views.template import CodexXMLTemplateMixin from codex.views.util import pop_name class OPDS1FacetsView(CodexXMLTemplateMixin, OPDSBrowserView): """OPDS 1 Facets methods.""" TARGET = "opds1" IS_START_PAGE: bool = False def __init__(self, *args, **kwargs) -> None: """Initialize properties.""" super().__init__(*args, **kwargs) self._user_agent_name: str | None = None self._mime_type_map: MappingProxyType[str, str] | None = None self._use_facets: bool | None = None self._obj: MappingProxyType[str, Any] | None = None @property def mime_type_map(self) -> MappingProxyType[str, str]: """Memoize mime type map.""" if self._mime_type_map is None: self._mime_type_map = ( MimeType.SIMPLE_FILE_TYPE_MAP if self.user_agent_name in UserAgentNames.SIMPLE_DOWNLOAD_MIME_TYPES else MimeType.FILE_TYPE_MAP ) return self._mime_type_map @property def use_facets(self) -> bool: """Memoize use_facets.""" if self._use_facets is None: self._use_facets = self.user_agent_name in UserAgentNames.FACET_SUPPORT return self._use_facets @property def obj(self) -> MappingProxyType[str, Any]: """Get the browser page and serialize it for this subclass.""" if self._obj is None: group_qs, book_qs, num_pages, total_count, zero_pad, mtime = ( self._get_group_and_books() ) book_qs = book_qs.select_related("series", "volume", "language") title = self.get_browser_page_title() self._obj = MappingProxyType( { "title": title, "groups": group_qs, "books": book_qs, "zero_pad": zero_pad, "num_pages": num_pages, "total_count": total_count, "mtime": mtime, } ) return self._obj def _facet(self, kwargs, facet_group, facet_title, new_query_params) -> OPDS1Link: kwargs = pop_name(kwargs) facet_active = False for key, val in new_query_params.items(): if self.request.GET.get(key) == val: facet_active = True break query = {} query.update(self.request.GET) query.update(new_query_params) href = reverse("opds:v1:feed", kwargs=dict(kwargs), query=query) title = " ".join(filter(None, (facet_group.title_prefix, facet_title))).strip() return OPDS1Link( Rel.FACET, href, MimeType.NAV, title=title, facet_group=facet_group.query_param, facet_active=facet_active, ) def _facet_entry(self, item, facet_group, facet, query_params) -> OPDS1Entry: name = " ".join( filter(None, (facet_group.glyph, facet_group.title_prefix, facet.title)) ).strip() entry_obj = OPDS1EntryObject( group=item.get("group"), ids=item.get("pks"), name=name, ) qps = {**self.request.GET} qps.update(query_params) zero_pad: int = self.obj["zero_pad"] data = OPDS1EntryData( self.opds_acquisition_groups, zero_pad, metadata=False, mime_type_map=self.mime_type_map, ) return OPDS1Entry(entry_obj, qps, data, title_filename_fallback=False) def _is_facet_active(self, facet_group, facet) -> bool: compare = [facet.value] default_val = DEFAULT_FACETS.get(facet_group.query_param) if facet.value == default_val: compare += [None] return self.request.GET.get(facet_group.query_param) in compare @staticmethod def _did_special_group_change(group, facet_group) -> bool: """Test if one of the special groups changed.""" for test_group in ("f", "a"): if (group == test_group and facet_group != test_group) or ( group != test_group and facet_group == test_group ): return True return False def _facet_or_facet_entry(self, facet_group, facet, *, entries: bool): # This logic preempts facet:activeFacet but no one uses it. group = self.kwargs.get("group") if facet_group.query_param == "topGroup" and self._did_special_group_change( group, facet.value ): kwargs = {"group": facet.value, "pks": {}, "page": 1} else: kwargs = self.kwargs qps = {facet_group.query_param: facet.value} if entries: facet = self._facet_entry(kwargs, facet_group, facet, qps) else: facet = self._facet(kwargs, facet_group, facet.title, qps) return facet def _facet_group(self, facet_group, *, entries: bool) -> list: facets = [] for facet in facet_group.facets: if facet.value == "f": efv_flag = ( AdminFlag.objects.only("on") .get(key=AdminFlagChoices.FOLDER_VIEW.value) .on ) if not efv_flag: continue if facet_obj := self._facet_or_facet_entry( facet_group, facet, entries=entries ): facets += [facet_obj] return facets def facets(self, *, entries: bool) -> list: """Return facets.""" facets = [] if self.IS_START_PAGE: facets += self._facet_group(RootFacetGroups.TOP_GROUP, entries=entries) else: group = self.kwargs.get("group") if ( group != "c" and self.user_agent_name not in UserAgentNames.CLIENT_REORDERS ): facets += self._facet_group(FacetGroups.ORDER_BY, entries=entries) facets += self._facet_group(FacetGroups.ORDER_REVERSE, entries=entries) return facets ================================================ FILE: codex/views/opds/v1/feed.py ================================================ """OPDS v1 feed.""" from collections.abc import Sequence from typing import TYPE_CHECKING, Any, override from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from rest_framework.throttling import BaseThrottle, ScopedRateThrottle from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.scribe.tasks import LazyImportComicsTask from codex.serializers.browser.settings import OPDSSettingsSerializer from codex.serializers.opds.v1 import OPDS1TemplateSerializer from codex.settings import BROWSER_MAX_OBJ_PER_PAGE, FALSY from codex.version import VERSION from codex.views.opds.const import BLANK_TITLE from codex.views.opds.start import OPDSStartViewMixin from codex.views.opds.v1.const import OPDS1EntryData, OpdsNs, RootTopLinks from codex.views.opds.v1.entry.entry import OPDS1Entry from codex.views.opds.v1.links import OPDS1LinksView if TYPE_CHECKING: from collections.abc import Mapping class OPDS1FeedView(OPDS1LinksView): """OPDS 1 Feed.""" template_name = "opds_v1/index.xml" serializer_class: type[BaseSerializer] | None = OPDS1TemplateSerializer input_serializer_class: type[OPDSSettingsSerializer] = OPDSSettingsSerializer # pyright: ignore[reportIncompatibleVariableOverride] throttle_classes: Sequence[type[BaseThrottle]] = (ScopedRateThrottle,) throttle_scope = "opds" @property def version(self): """Codex version.""" return VERSION @property def opds_ns(self): """Dynamic opds namespace.""" try: return OpdsNs.ACQUISITION if self.is_opds_acquisition else OpdsNs.CATALOG except Exception: logger.exception("Getting OPDS v1 namespace") @property def is_acquisition(self) -> bool: """Is acquisition.""" return self.is_opds_acquisition @property def id_tag(self): """Feed id is the url.""" try: return self.request.build_absolute_uri() except Exception: logger.exception("Getting OPDS v1 ID Tag") @property def title(self) -> str: """Create the feed title.""" result = "" try: browser_title: Mapping[str, Any] = self.obj.get("title", {}) if browser_title: parent_name = browser_title.get("parent_name", "All") pks = self.kwargs["pks"] if not parent_name and not pks: parent_name = "All" group_name = browser_title.get("group_name") result = " ".join(filter(None, (parent_name, group_name))).strip() if not result: result = BLANK_TITLE except Exception: logger.exception("Getting OPDS v1 feed title") return result @property def updated(self) -> str: """Use mtime for updated.""" datestr = "" try: mtime = self.obj.get("mtime") if mtime: datestr = mtime.isoformat() except Exception: logger.exception("Getting OPDS v1 updated") return datestr @property def items_per_page(self) -> int | None: """Return opensearch:itemsPerPage.""" try: if self.params.get("search"): return BROWSER_MAX_OBJ_PER_PAGE except Exception: logger.exception("Getting OPDS v1 items per page") @property def total_results(self): """Return opensearch:totalResults.""" try: if self.params.get("search"): return self.obj.get("total_count", 0) except Exception: logger.exception("Getting OPDS v1 total results") def _get_entries_section(self, key, metadata) -> list: """Get entries by key section.""" entries = [] if objs := self.obj.get(key): zero_pad: int = self.obj["zero_pad"] data = OPDS1EntryData( self.opds_acquisition_groups, zero_pad, metadata, self.mime_type_map ) fallback = bool(self.admin_flags.get("folder_view")) import_pks = set() for obj in objs: entry = OPDS1Entry( obj, self.request.GET, data, title_filename_fallback=fallback, ) if key == "books" and entry.lazy_metadata(): import_pks.add(obj.pk) entries.append(entry) if import_pks: task = LazyImportComicsTask(group="c", pks=frozenset(import_pks)) LIBRARIAN_QUEUE.put(task) return entries @property def entries(self) -> list: """Create all the entries.""" entries = [] try: # if not self.use_facets: # and self.kwargs.get("page") == 1: facet_entries = not self.use_facets if self.IS_START_PAGE: entries += self.add_top_links(RootTopLinks.ALL) else: entries += self.add_start_link() entries += self.facets(entries=facet_entries) if not self.IS_START_PAGE: entries += self._get_entries_section("groups", metadata=False) metadata = self.request.GET.get("opdsMetadata", "").lower() not in FALSY entries += self._get_entries_section("books", metadata) except Exception: logger.exception("Getting OPDS v1 entries") return entries @override @extend_schema(parameters=[input_serializer_class]) def get(self, *_args, **_kwargs) -> Response: """Get the feed.""" serializer = self.get_serializer(self) self.mark_user_active() return Response(serializer.data, content_type=self.content_type) class OPDS1StartView(OPDSStartViewMixin, OPDS1FeedView): """OPDS v1 Start Page.""" @override @extend_schema( parameters=[OPDS1FeedView.input_serializer_class], operation_id="opds_1.2_start_retrieve", ) def get(self, *args, **kwargs) -> Response: return super().get(*args, **kwargs) ================================================ FILE: codex/views/opds/v1/links.py ================================================ """OPDS v1 Links methods.""" from django.urls import reverse from loguru import logger from codex.views.opds.const import MimeType, Rel from codex.views.opds.v1.const import ( OPDS1EntryData, OPDS1EntryObject, OPDS1Link, RootTopLinks, TopLink, TopLinks, ) from codex.views.opds.v1.entry.entry import OPDS1Entry from codex.views.opds.v1.facets import OPDS1FacetsView from codex.views.util import pop_name class OPDS1LinksView(OPDS1FacetsView): """OPDS 1 Links methods.""" def is_top_link_displayed(self, top_link) -> bool: """Determine if this top link should be displayed.""" for key, value in top_link.kwargs.items(): if str(self.kwargs.get(key)) != str(value): return False for key, value in top_link.query_params.items(): if str(self.request.GET.get(key)) != str(value): return False return True def _link( self, kwargs, rel, query_params=None, mime_type=MimeType.NAV ) -> OPDS1Link: """Create a link.""" if query_params is None: query_params = self.request.GET kwargs = pop_name(kwargs) href = reverse("opds:v1:feed", kwargs=dict(kwargs), query=query_params) return OPDS1Link(rel, href, mime_type) def _top_link(self, top_link): """Create a link from a top link.""" return self._link( top_link.kwargs, top_link.rel, top_link.query_params, top_link.mime_type ) def _root_links(self) -> list: """Navigation Root Links.""" links = [] if ( (up_route := self.get_last_route()) and (pks := up_route.get("pks", [])) and 0 not in pks ): links += [self._link(up_route, Rel.UP)] page = self.kwargs.get("page", 1) if page > 1: prev_route = {**self.kwargs, "page": page - 1} links += [self._link(prev_route, Rel.PREV)] if page < self.obj.get("num_pages", 1): next_route = {**self.kwargs, "page": page + 1} links += [self._link(next_route, Rel.NEXT)] return links def _links_start_page_links(self) -> list: links = [] if not self.IS_START_PAGE: return links links += [ OPDS1Link(Rel.ALTERNATE, reverse("opds:v2:start"), MimeType.OPDS_JSON) ] return links def _links_facets(self) -> list: links = [] if not self.use_facets: return links for top_link in TopLinks.ALL + RootTopLinks.ALL: if not self.is_top_link_displayed(top_link): links += [self._top_link(top_link)] if facets := self.facets(entries=False): links += facets return links @property def links(self) -> list: """Create all the links.""" links = [] try: self_mime_type = ( MimeType.ACQUISITION if self.is_opds_acquisition else MimeType.NAV ) links += [ OPDS1Link("self", self.request.get_full_path(), self_mime_type), OPDS1Link( Rel.AUTHENTICATION, reverse("opds:auth:v1"), MimeType.AUTHENTICATION, ), OPDS1Link(Rel.START, reverse("opds:v1:start"), MimeType.NAV), OPDS1Link( Rel.SEARCH, reverse("opds:v1:opensearch_v1"), MimeType.OPENSEARCH ), ] links += self._links_start_page_links() links += self._root_links() links += self._links_facets() except Exception: logger.exception("Getting OPDS v1 links") return links def _top_link_entry(self, top_link) -> OPDS1Entry: """Create a entry instead of a facet.""" name = " ".join(filter(None, (top_link.glyph, top_link.title))) entry_obj = OPDS1EntryObject( group=top_link.kwargs["group"], ids=top_link.kwargs["pks"], name=name, summary=top_link.desc, ) zero_pad: int = self.obj["zero_pad"] data = OPDS1EntryData( self.opds_acquisition_groups, zero_pad, metadata=False, mime_type_map=self.mime_type_map, ) return OPDS1Entry( entry_obj, top_link.query_params, data, title_filename_fallback=False ) def add_start_link(self) -> list[OPDS1Entry]: """Add the start link.""" top_link: TopLink = TopLinks.START name = " ".join(filter(None, (top_link.glyph, top_link.title))) entry_obj = OPDS1EntryObject( name=name, summary=top_link.desc, url_name=top_link.url_name ) data = OPDS1EntryData( frozenset(), 0, metadata=False, mime_type_map=self.mime_type_map ) return [OPDS1Entry(entry_obj, {}, data, title_filename_fallback=False)] def add_top_links(self, top_links) -> list: """Add a list of top links as entries if they should be enabled.""" entries = [] for tl in top_links: if not self.is_top_link_displayed(tl): entries += [self._top_link_entry(tl)] return entries ================================================ FILE: codex/views/opds/v2/__init__.py ================================================ """OPDS v2 Views.""" ================================================ FILE: codex/views/opds/v2/const.py ================================================ """OPDS v2 consts.""" # https://drafts.opds.io/opds-2.0.html from collections.abc import Mapping, Sequence from dataclasses import dataclass from types import MappingProxyType from django.db.models.query import QuerySet from codex.views.opds.const import BookmarkFilters, Rel @dataclass class HrefData: """Data for creating hrefs.""" kwargs: Mapping[str, str | Sequence[int] | int] | None = None query_params: Mapping[str, str | int | Mapping] | None = None inherit_query_params: bool = False url_name: str | None = None min_page: int | None = None max_page: int | None = None template: str = "" @dataclass class LinkData: """Data for creating links.""" rel: str href_data: HrefData title: str | None = None mime_type: str | None = None template: str | None = None height: int | None = None width: int | None = None size: int | None = None href: str | None = None num_items: int | None = None authenticate: Mapping | None = None @dataclass class Link: """Groups Navigation Link.""" rel: str title: str group: str | None = "" query_params: Mapping | None = None inherit_query_params: bool = True subtitle: str = "" @dataclass class LinkGroup: """Navigation Group.""" title: str links: Sequence[Link] | QuerySet subtitle: str = "" TOP_GROUPS = ( LinkGroup( "Top Groups", ( Link(Rel.SUB, "Publishers", "r", {"topGroup": "p"}), Link(Rel.SUB, "Series", "r", {"topGroup": "s"}), Link(Rel.SUB, "Issues", "r", {"topGroup": "c"}), Link(Rel.SUB, "Folders", "f", {"topGroup": "f"}), Link(Rel.SUB, "Story Arcs", "a", {"topGroup": "a"}), ), ), ) START_GROUPS = ( LinkGroup( "Start (Reset filters & order)", ( Link( Rel.START, "Start", None, {}, inherit_query_params=False, ), ), ), ) FACETS = ( LinkGroup( "⏿ Read Filter", ( Link(Rel.FACET, "Unread", "", {"filters": BookmarkFilters.UNREAD}), Link( Rel.FACET, "In Progress", "", {"filters": BookmarkFilters.IN_PROGRESS} ), Link(Rel.FACET, "Read", "", {"filters": BookmarkFilters.READ}), Link(Rel.FACET, "All", "", {"filters": BookmarkFilters.NONE}), ), ), LinkGroup( "⬄ Order By", ( Link(Rel.FACET, "Date", "", {"orderBy": "date"}), Link(Rel.FACET, "Name", "", {"orderBy": "sort_name"}), Link(Rel.FACET, "Filename", "", {"orderBy": "filename"}), ), ), LinkGroup( "⇕ Order Direction", ( Link(Rel.FACET, "Ascending", "", {"orderReverse": False}), Link(Rel.FACET, "Descending", "", {"orderReverse": True}), ), ), ) _PREVIEW_GROUP_PARAMS = MappingProxyType( { "filters": BookmarkFilters.UNREAD, } ) PREVIEW_GROUPS = ( LinkGroup( "Ordered Groups", ( Link( Rel.FEATURED, "Keep Reading", "r", MappingProxyType( { "topGroup": "c", **_PREVIEW_GROUP_PARAMS, "orderBy": "bookmark_updated_at", "orderReverse": True, "title": "Keep Reading", } ), ), Link( Rel.SORT_NEW, "Latest Unread", "r", MappingProxyType( { "topGroup": "c", **_PREVIEW_GROUP_PARAMS, "orderBy": "created_at", "orderReverse": True, "title": "Latest Unread", } ), ), Link( Rel.SORT_NEW, "Oldest Unread", "r", MappingProxyType( { "topGroup": "c", **_PREVIEW_GROUP_PARAMS, "orderBy": "date", "orderReverse": False, "title": "Oldest Unread", } ), ), ), ), ) ================================================ FILE: codex/views/opds/v2/feed/__init__.py ================================================ """OPDS v2.0 Feed.""" import json import urllib.parse from collections.abc import Iterable, Mapping from datetime import datetime from types import MappingProxyType from typing import override from django.db.models import QuerySet from drf_spectacular.utils import extend_schema from rest_framework.response import Response from codex.serializers.browser.settings import OPDSSettingsSerializer from codex.serializers.opds.v2.feed import OPDS2FeedSerializer from codex.settings import BROWSER_MAX_OBJ_PER_PAGE, FALSY from codex.views.const import EPOCH_START from codex.views.opds.const import BLANK_TITLE from codex.views.opds.start import OPDSStartViewMixin from codex.views.opds.v2.feed.groups import OPDS2FeedGroupsView _ORDER_BY_SUBTITLE_MAP: MappingProxyType[str, str] = MappingProxyType( {"bookmark_updated_at": "read", "created_at": "added", "date": "published"} ) class OPDS2FeedView(OPDS2FeedGroupsView): """OPDS 2.0 Feed.""" serializer_class = OPDS2FeedSerializer input_serializer_class = OPDSSettingsSerializer IS_START_PAGE: bool = False def _subtitle_filters(self, qps: Mapping) -> list[str]: parts = [] if not ( (filters := qps.get("filters")) and (filters := urllib.parse.unquote(filters)) and (filters := json.loads(filters)) ): return parts filter_keys = [] for key, value in filters.items(): # ty: ignore[unresolved-attribute] if not value: continue if key == "bookmark": bf = "reading" if value == "IN_PROGRESS" else value.lower() parts.append(bf) else: filter_keys.append(key) if filter_keys: parts += sorted(filter_keys) return parts def _subtitle(self) -> str: """Subtitle for main feed.""" # Add filters and order parts = [] qps = self.request.GET parts += self._subtitle_filters(qps) if q := qps.get("query"): if search_query := urllib.parse.unquote(q): parts.append(search_query) if (order_by := qps.get("orderBy", "")) and order_by != "sort_name": order_by = _ORDER_BY_SUBTITLE_MAP.get(order_by, order_by) parts.append(order_by) if ( order_reverse := qps.get("orderReverse", False) ) and order_reverse not in FALSY: parts.append("desc") return ", ".join(parts) if parts else "" def _title(self, browser_title: Mapping[str, str]): """Create the feed title.""" result = self.request.GET.get("title", "") if not result and browser_title: parent_name = browser_title.get("parent_name", None) pks = self.kwargs["pks"] if not parent_name and not pks: parent_name = "All" group_name = browser_title.get("group_name") result = " ".join(filter(None, (parent_name, group_name))).strip() if not result: result = BLANK_TITLE return result def _feed_metadata(self, title: str, mtime: datetime | None) -> MappingProxyType: number_of_items = self._opds_number_of_books + self._opds_number_of_groups current_page = self.kwargs.get("page") md = { "title": title, "number_of_items": number_of_items, "items_per_page": BROWSER_MAX_OBJ_PER_PAGE, "current_page": current_page, } if mtime: md["modified"] = mtime if subtitle := self._subtitle(): md["subtitle"] = subtitle return MappingProxyType(md) def _feed_navigation_and_groups( self, group_qs: QuerySet, book_qs: QuerySet, zero_pad: int | None, title: str, ) -> tuple[tuple, tuple, tuple]: groups = [] navigation = [] top_groups = self.get_top_groups() if self.IS_START_PAGE: groups += self.get_ordered_groups() first_top_group = next(iter(top_groups), {}) navigation = first_top_group.get("navigation", []) publications = [] else: # Move the first group's navigation to become the feed navigation. # The feed navigation is titled "Browse"" in Stump zero_pad = zero_pad or 0 regular_groups = self.get_groups(group_qs, book_qs, title, zero_pad) first_regular_group = next(iter(regular_groups), {}) navigation = first_regular_group.pop("navigation", []) groups += regular_groups groups += top_groups groups += self.get_facets() groups += self.get_start_groups() publications = first_regular_group.pop("publications", []) return tuple(navigation), tuple(groups), tuple(publications) @staticmethod def _update_feed_modified( feed_metadata: Mapping, groups: Iterable[Mapping], # noqa: ARG004 ) -> Mapping: return feed_metadata @override def get_object(self) -> MappingProxyType: """Get the browser page and serialize it for this subclass.""" group_qs, book_qs, _, _, zero_pad, mtime = self.group_and_books # convert browser_page into opds pagej browser_title = self.get_browser_page_title() title = "Start" if self.IS_START_PAGE else self._title(browser_title) # opds page metadata = self._feed_metadata(title, mtime) # links up_route = self.get_last_route() links = tuple(self.get_links(up_route)) # Navigation & Groups navigation, groups, publications = self._feed_navigation_and_groups( group_qs, book_qs, zero_pad, title ) metadata = self._update_feed_modified(metadata, groups) feed = { "metadata": metadata, "links": links, } if navigation: feed["navigation"] = navigation if groups: feed["groups"] = groups if publications: feed["publications"] = publications return MappingProxyType(feed) @override @extend_schema(parameters=[input_serializer_class]) def get(self, *_args, **_kwargs) -> Response: """Get the feed.""" obj = self.get_object() serializer = self.get_serializer(obj) self.mark_user_active() return Response(serializer.data) class OPDS2StartView(OPDSStartViewMixin, OPDS2FeedView): """Start View.""" @override @staticmethod def _update_feed_modified( feed_metadata: Mapping, groups: Iterable[Mapping] ) -> Mapping: max_modified = EPOCH_START for group in groups: for publication in group.get("publications", []): modified = publication.get("metadata", {}).get("modified", EPOCH_START) max_modified = max(max_modified, modified) if max_modified != feed_metadata["modified"]: feed_metadata = dict(feed_metadata) feed_metadata["modified"] = max_modified return feed_metadata @override @extend_schema( parameters=[OPDS2FeedView.input_serializer_class], operation_id="opds_2.0_start_retrieve", ) def get(self, *args, **kwargs) -> Response: return super().get(*args, **kwargs) ================================================ FILE: codex/views/opds/v2/feed/feed_links.py ================================================ """OPDS v2.0 top links section methods.""" from types import MappingProxyType from typing import Any from codex.views.opds.const import MimeType, Rel, TopRoutes from codex.views.opds.v2.const import HrefData, LinkData from codex.views.opds.v2.feed.links import OPDS2LinksView _SEARCH_QUERY_PARAMS = MappingProxyType({"topGroup": "s"}) class OPDS2FeedLinksView(OPDS2LinksView): """OPDS 2.0 top links section methods.""" def _link_auth(self): href_data = HrefData( {}, url_name="opds:auth:v1", ) link_data = LinkData( Rel.AUTHENTICATION, href_data, mime_type=MimeType.AUTHENTICATION, ) return self.link(link_data) def _link_search(self): href_data = HrefData( TopRoutes.SERIES, query_params=_SEARCH_QUERY_PARAMS, max_page=1, template="{?query}", ) link_data = LinkData(Rel.SEARCH, href_data) return self.link(link_data) def _get_static_links(self): start_href_data = HrefData({}, url_name="opds:v2:start") start_link_data = LinkData(Rel.START, start_href_data) register_href_data = HrefData( {}, url_name="app:start", ) register_link_data = LinkData( Rel.REGISTER, register_href_data, mime_type=MimeType.HTML, ) static_links = [ self._link_auth(), self._link_search(), ] static_links += [ self.link(start_link_data), ] if not self.request.user: static_links += [ self.link(register_link_data), ] return static_links def _top_route(self) -> dict[str, Any]: group = "f" if self.kwargs.get("group") == "f" else "r" return {"group": group, "pks": (0,), "page": 1} def _link_page(self, rel, page): """Links to a page of results.""" kwargs = {**self.kwargs, "page": page} href_data = HrefData(kwargs, inherit_query_params=True) link_data = LinkData(rel, href_data) return self.link(link_data) def get_links(self, up_route): """Get the top links section of the feed.""" pks = self.kwargs.get("pks") page = self.kwargs.get("page", 0) links_data = [ self.link_self(), *self._get_static_links(), ] if pks and 0 not in pks: # no top or up links if we're already at the top top_href_data = HrefData(self._top_route(), inherit_query_params=True) top_link_data = LinkData(Rel.TOP, top_href_data) up_href_data = HrefData(up_route, inherit_query_params=True) up_link_data = LinkData(Rel.UP, up_href_data) links_data += [ self.link(top_link_data), self.link(up_link_data), ] if page != 1: links_data += [ self._link_page("first", 1), ] if page > 1: links_data += [ self._link_page("previous", page - 1), ] if page != self.num_pages: links_data += [ self._link_page("next", page + 1), self._link_page("last", self.num_pages), ] link_dict = {} for link in links_data: self.link_aggregate(link_dict, link) return self.get_links_from_dict(link_dict) ================================================ FILE: codex/views/opds/v2/feed/groups.py ================================================ """OPDS v2.0 Feed Groups.""" from collections.abc import Mapping from typing import Any from codex.models.groups import BrowserGroupModel from codex.models.named import StoryArc from codex.settings import BROWSER_MAX_OBJ_PER_PAGE from codex.views.opds.const import BLANK_TITLE, Rel from codex.views.opds.v2.const import ( FACETS, PREVIEW_GROUPS, START_GROUPS, TOP_GROUPS, HrefData, Link, LinkData, LinkGroup, ) from codex.views.opds.v2.feed.publications import OPDS2PublicationsView class OPDS2FeedGroupsView(OPDS2PublicationsView): """OPDS 2.0 Feed Groups.""" ######### # Links # ######### def _create_link_kwargs( self, link_spec: Link | BrowserGroupModel ) -> dict[str, Any] | dict: """Create link kwargs.""" if isinstance(link_spec, Link): if link_spec.group is None: # Start Link return {} group = link_spec.group or self.kwargs.get("group", "r") pks = (0,) else: group = link_spec.__class__.__name__[0].lower() pks = link_spec.ids # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] return {"group": group, "pks": pks, "page": 1} @staticmethod def _create_link_query_params( link_spec: Link | BrowserGroupModel, kwargs: Mapping ) -> Mapping | None: """Create link query params.""" if not isinstance(link_spec, Link | StoryArc): return {} qps = link_spec.query_params if isinstance(link_spec, Link) else {} # Special order by for story_arcs if ( kwargs and kwargs.get("group") == "a" and kwargs.get("pks") and (not qps or not qps.get("orderBy")) ): qps = dict(qps) if qps else {} qps["orderBy"] = "story_arc_number" return qps def _create_links_from_link_spec( self, link_spec: Link | BrowserGroupModel, link_dict: Mapping, ) -> None: if not self.is_allowed(link_spec): return kwargs = self._create_link_kwargs(link_spec) qps = self._create_link_query_params(link_spec, kwargs) title = getattr(link_spec, "title", "") if not title: title = getattr(link_spec, "name", "") if not title: title = BLANK_TITLE if isinstance(link_spec, Link): inherit_query_params = link_spec.inherit_query_params rel = link_spec.rel else: inherit_query_params = True rel = Rel.SUB url_name = "opds:v2:start" if not kwargs else None href_data = HrefData( kwargs, qps, url_name=url_name, inherit_query_params=inherit_query_params ) link_data = LinkData(rel, href_data, title=title) link = self.link(link_data) self.link_aggregate(link_dict, link) ########## # Groups # ########## def _create_group_from_group_spec( self, group_spec: LinkGroup, *, paginate: bool = False ) -> list: groups = [] link_dict = {} for link_spec in group_spec.links: self._create_links_from_link_spec(link_spec, link_dict) links = self.get_links_from_dict(link_dict) if links: metadata: dict[str, str | int] = {"title": group_spec.title} if group_spec.subtitle: metadata["subtitle"] = group_spec.subtitle current_page = self.kwargs.get("page", 1) if paginate: pagination = { "current_page": current_page, "items_per_page": BROWSER_MAX_OBJ_PER_PAGE, "number_of_items": self._opds_number_of_groups, } metadata.update(pagination) group: dict[str, Mapping | list] = { "metadata": metadata, } group["navigation"] = links groups += [group] return groups def _create_group(self, group_specs, *, paginate: bool = False) -> list: """Create links sections for groups and facets.""" groups = [] for group_spec in group_specs: groups += self._create_group_from_group_spec(group_spec, paginate=paginate) return groups def get_top_groups(self): """Top Nav Groups.""" return self._create_group(TOP_GROUPS) def get_ordered_groups(self) -> list: """Ordered Publication Groups.""" groups = [] for group_spec in PREVIEW_GROUPS: # explode into individual groups for link_spec in group_spec.links: pub_section = self.get_publications_preview(link_spec) groups += pub_section return groups def get_start_groups(self): """Start Groups.""" return self._create_group(START_GROUPS) def get_groups(self, group_qs, book_qs, title: str, zero_pad: int): """Regular publication groups.""" groups = [] # Regular Groups tup = (LinkGroup(title, group_qs),) subtitle = group_qs.model.__name__ if group_qs.model else "UnknownGroup" if subtitle != "Series": subtitle += "s" groups += self._create_group(tup, paginate=True) # Publications groups += self.get_publications(book_qs, zero_pad, title, subtitle=subtitle) return groups def get_facets(self): """Facet Groups.""" return self._create_group(FACETS) ================================================ FILE: codex/views/opds/v2/feed/links.py ================================================ """Links methods for OPDS v2.0 Feed.""" import json from copy import deepcopy from datetime import datetime from typing import override from urllib.parse import parse_qsl, urlparse from django.db.models import QuerySet from codex.settings import FALSY from codex.views.opds.const import BookmarkFilters, MimeType, Rel, UserAgentNames from codex.views.opds.feed import OPDSBrowserView from codex.views.opds.v2.const import HrefData, LinkData from codex.views.opds.v2.href import OPDS2HrefMixin _BOOKMARK_FILTERS_NONE_STR = json.dumps(dict(BookmarkFilters.NONE)) class OPDS2LinksView(OPDS2HrefMixin, OPDSBrowserView): """Links methods for OPDS 2.0 Feed.""" TARGET = "opds2" def __init__(self, *args, **kwargs) -> None: """Initialize properties.""" super().__init__(*args, **kwargs) self._num_pages: int | None = None self._group_and_books: ( tuple[QuerySet, QuerySet, int, int, int | None, datetime | None] | None ) = None self._user_agent_name: str | None = None @property def group_and_books( self, ) -> tuple[QuerySet, QuerySet, int, int, int | None, datetime | None]: """Memoize Group And Books for num_pages.""" # group_qs, book_qs, num_pages, total_count, zero_pad, mtime if self._group_and_books is None: self._group_and_books = self._get_group_and_books() return self._group_and_books @property @override def num_pages(self) -> int: """Memoize num_pages.""" if self._num_pages is None: self._num_pages = self.group_and_books[2] return self._num_pages @staticmethod def _link_attributes(data, link) -> None: """Add attributes to link.""" if data.title: link["title"] = data.title if data.href_data.template: link["templated"] = True if data.height: link["height"] = data.height if data.width: link["width"] = data.width if data.size: link["size"] = data.size @staticmethod def _link_properties(data, link) -> None: """Add properties attribute to link.""" if data.num_items or data.authenticate: link["properties"] = {} if data.num_items: link["properties"]["number_of_items"] = data.num_items if data.authenticate: link["properties"]["authenticate"] = data.authenticate def link(self, data: LinkData) -> dict | None: """Create a link element.""" if data.href: href = data.href else: href = self.href(data.href_data) if not href: return None if self.user_agent_name in UserAgentNames.REQUIRE_ABSOLUTE_URL: href = self.request.build_absolute_uri(href) mime_type = data.mime_type or MimeType.OPDS_JSON link = {"href": href, "rel": data.rel, "type": mime_type} self._link_attributes(data, link) self._link_properties(data, link) return link @staticmethod def _normalize_query_params(qps_dict) -> frozenset: if qps_dict.get("orderBy") == "sort_name": qps_dict.pop("orderBy", None) if qps_dict.get("orderReverse", "").lower() in FALSY: qps_dict.pop("orderReverse", None) if qps_dict.get("filters", {}) == _BOOKMARK_FILTERS_NONE_STR: qps_dict.pop("filters") return frozenset(qps_dict.items()) def _is_self_link(self, href) -> bool: """Return if the path and query params match the current request.""" req_qps = deepcopy(self.request.GET) req_qps = self._normalize_query_params(req_qps) # This is inefficient since i construct the query_params before this # but it's difficult to get the final query params for all constructions. parts = urlparse(href) href_qps = dict(parse_qsl(parts.query)) href_qps = self._normalize_query_params(href_qps) return self.request.path == parts.path and req_qps == href_qps def link_aggregate(self, link_dict, link) -> None: """Aggregate links into a dict to combine rels into an array.""" if not link: return href = link["href"] rel = link.pop("rel") if href in link_dict: link_dict[href]["rels"].add(rel) else: link_dict[href] = link link_dict[href]["rels"] = {rel} if self._is_self_link(href): link_dict[href]["rels"].add(Rel.SELF) @staticmethod def get_links_from_dict(link_dict) -> list: """Produce the final links list from the aggregate dict.""" final_links = [] for link in link_dict.values(): # rel can be a list or or a string. rels = sorted(link.pop("rels")) rel = rels if len(rels) > 1 else rels[0] link["rel"] = rel final_links.append(link) return final_links def link_self(self): """Create the self link for this page.""" href_data = HrefData(self.kwargs, dict(self.request.GET)) link_data = LinkData(Rel.SELF, href_data) return self.link(link_data) ================================================ FILE: codex/views/opds/v2/feed/publications.py ================================================ """Publication Methods for OPDS v2.0 feed.""" from collections.abc import Iterable from datetime import datetime from math import floor from types import MappingProxyType from typing import Final, override from urllib.parse import quote_plus from caseconverter import snakecase from codex.choices.admin import AdminFlagChoices from codex.librarian.covers.create import THUMBNAIL_HEIGHT, THUMBNAIL_WIDTH from codex.models import AdminFlag, Comic from codex.models.groups import BrowserGroupModel, Folder from codex.settings import BROWSER_MAX_OBJ_PER_PAGE from codex.views.opds.const import MimeType, Rel from codex.views.opds.v2.const import HrefData, Link, LinkData from codex.views.opds.v2.feed.feed_links import OPDS2FeedLinksView _PUBLICATION_PREVIEW_LIMIT: Final = 5 _PREVIEW_SHOW_PARAMS: Final[MappingProxyType[str, bool]] = MappingProxyType( {"p": True, "s": True} ) class OPDS2PublicationBaseView(OPDS2FeedLinksView): """Base view for publication entries.""" def __init__(self, *args, **kwargs) -> None: """Initialize vars.""" self._auth_link = None super().__init__(*args, **kwargs) @staticmethod def is_allowed(link_spec: Link | BrowserGroupModel) -> bool: """Return if the link allowed.""" if ( isinstance(link_spec, Link) and ( link_spec.group == "f" or ( link_spec.query_params and link_spec.query_params.get("topGroup") == "f" ) ) ) or isinstance(link_spec, Folder): # Folder perms efv_flag = ( AdminFlag.objects.only("on") .get(key=AdminFlagChoices.FOLDER_VIEW.value) .on ) if not efv_flag: return False return True def _publication_metadata(self, obj, zero_pad) -> dict: title_filename_fallback = bool(self.admin_flags.get("folder_view")) if self.kwargs.get("group") == "f": title = Comic.get_filename(obj) else: title = Comic.get_title( obj, volume=True, name=False, filename_fallback=title_filename_fallback, zero_pad=zero_pad, ) md = { "type": MimeType.BOOK, "modified": obj.updated_at, "published": obj.date, "title": title, } if subtitle := obj.name: md["subtitle"] = subtitle if page_count := obj.page_count: md["number_of_pages"] = page_count return md @property def auth_link(self): """Create a reusable authentication link dict.""" if self._auth_link is None: auth_href_data = HrefData({}, url_name="opds:auth:v1") auth_link_data = LinkData( Rel.AUTHENTICATION, auth_href_data, mime_type=MimeType.AUTHENTICATION, ) self._auth_link = self.link(auth_link_data) return self._auth_link def _publication_link(self, kwargs, url_name, rel, mime_type, size=None): href_data = HrefData(kwargs, url_name=url_name) link_data = LinkData( rel, href_data, mime_type=mime_type, authenticate=self.auth_link, size=size ) return self.link(link_data) def _publication(self, obj, zero_pad) -> dict: pub = {} if not obj: return pub pub["metadata"] = self._publication_metadata(obj, zero_pad) # Acquisition/Download link fn = quote_plus(obj.get_filename()) acq_kwargs = {"pk": obj.pk, "filename": fn} download_mime_type = MimeType.FILE_TYPE_MAP.get(obj.file_type, MimeType.OCTET) acq_link = self._publication_link( acq_kwargs, "opds:bin:download", Rel.ACQUISITION, download_mime_type, size=obj.size, ) # Progression Link prog_kwargs = {"group": "c", "pk": obj.pk} prog_link = self._publication_link( prog_kwargs, "opds:v2:position", Rel.PROGRESSION, MimeType.PROGRESSION ) # Divina Manifest Link manifest_kwargs = {"pks": [obj.pk]} manifest_link = self._publication_link( manifest_kwargs, "opds:v2:manifest", Rel.SELF, MimeType.DIVINA ) links = [ acq_link, prog_link, manifest_link, ] pub["links"] = links return pub def _thumb(self, obj) -> list: images = [] if not obj: return images ts = floor(datetime.timestamp(obj.updated_at)) kwargs = {"group": obj.group, "pks": obj.ids} query_params = { "customCovers": True, "dynamicCovers": False, "ts": ts, } thumb_href_data = HrefData( kwargs, query_params, url_name="opds:bin:cover", ) thumb_link_data = LinkData( Rel.THUMBNAIL, thumb_href_data, mime_type=MimeType.WEBP, height=THUMBNAIL_HEIGHT, width=THUMBNAIL_WIDTH, authenticate=self.auth_link, ) thumb_link = self.link(thumb_link_data) images.append(thumb_link) return images class OPDS2PublicationsView(OPDS2PublicationBaseView): """Publication Methods for OPDS 2.0 feed.""" @override def _publication(self, obj, zero_pad) -> dict: pub = super()._publication(obj, zero_pad) if images := self._thumb(obj): pub["images"] = images return pub def _get_publications_links(self, link_spec) -> list: if not link_spec: return [] kwargs = {"group": link_spec.group, "pks": (0,), "page": 1} href_data = HrefData(kwargs, link_spec.query_params, inherit_query_params=True) # Must be rel="self" for Stump to add View All link_data = LinkData(Rel.SELF, href_data=href_data, title=link_spec.title) return [self.link(link_data)] def _get_publication_section_metadata( self, title: str, subtitle: str, number_of_items: int | None, items_per_page: int, ) -> dict: current_page = self.kwargs.get("page", 1) metadata = { "title": title, "current_page": current_page, "items_per_page": items_per_page, } if subtitle: metadata["subtitle"] = subtitle if number_of_items: metadata["number_of_items"] = self._opds_number_of_books return metadata def get_publications( self, book_qs: Iterable, zero_pad: int, title: str, subtitle: str = "", items_per_page=BROWSER_MAX_OBJ_PER_PAGE, link_spec=None, number_of_items: int | None = None, ) -> list: """Get publications section.""" publications = [] for obj in book_qs: pub = self._publication(obj, zero_pad) publications.append(pub) groups = [] if not publications: return groups metadata = self._get_publication_section_metadata( title, subtitle, number_of_items, items_per_page ) pub_group: dict[str, list | dict] = { "metadata": metadata, } if links := self._get_publications_links(link_spec): pub_group["links"] = links pub_group["publications"] = publications groups.append(pub_group) return groups def _get_publications_preview_feed_view(self, link_spec: Link): feed_view = OPDS2FeedLinksView() feed_view.request = self.request group = link_spec.group feed_view.kwargs = {"group": group, "pks": [0], "page": 1} params = self.get_browser_default_params() if link_spec.query_params: for key, value in link_spec.query_params.items(): params[snakecase(key)] = value params["show"].update(_PREVIEW_SHOW_PARAMS) params["limit"] = _PUBLICATION_PREVIEW_LIMIT feed_view.set_params(params) return feed_view def get_publications_preview(self, link_spec: Link) -> list: """Get a limited preview of publications outside the main query.""" feed_view = self._get_publications_preview_feed_view(link_spec) book_qs, book_count, zero_pad = feed_view.get_book_qs() if not book_count: return [] return self.get_publications( book_qs, zero_pad, link_spec.title, items_per_page=_PUBLICATION_PREVIEW_LIMIT, link_spec=link_spec, number_of_items=book_count, ) ================================================ FILE: codex/views/opds/v2/href.py ================================================ """Href methods for OPDS v2.0 Feed.""" import json from collections.abc import Mapping from itertools import chain from caseconverter import camelcase from django.urls import reverse from codex.settings import DEBUG from codex.views.opds.const import UserAgentNames from codex.views.util import pop_name class OPDS2HrefMixin: """Create links method.""" @property def num_pages(self) -> int: """Dummy.""" return 1 def _href_page_validate(self, kwargs, data) -> bool: """Validate the page bounds.""" min_page = min(1, 1 if data.min_page is None else data.min_page) max_page = max(1, self.num_pages if data.max_page is None else data.max_page) page = int(kwargs["page"]) return page >= min_page and page <= max_page def _href_update_query_params(self, data) -> dict: """Update the query params.""" # Merge query_params and camelCase keys qps_maps = [] if data.inherit_query_params: qps_maps.append(self.request.GET) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if data.query_params: qps_maps.append(data.query_params) query = {} for key, val in chain(*(d.items() for d in qps_maps)): query[camelcase(key)] = val # Stringify filters value if (filters := query.get("filters")) and isinstance(filters, Mapping): query["filters"] = json.dumps(dict(filters)) return query def href(self, data) -> str | None: """Create an href.""" url_name = data.url_name or "opds:v2:feed" kwargs = data.kwargs if data.kwargs is not None else self.kwargs # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if "page" in kwargs and not self._href_page_validate(kwargs, data): return None kwargs = pop_name(kwargs) query = self._href_update_query_params(data) href = reverse(url_name, kwargs=dict(kwargs), query=query) if DEBUG or self.user_agent_name in UserAgentNames.REQUIRE_ABSOLUTE_URL: # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] href = self.request.build_absolute_uri(href) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if template := data.template: parts = href.split("?") if len(parts) > 1: template += "&" href = template.join(parts) return href ================================================ FILE: codex/views/opds/v2/manifest.py ================================================ """Publication Methods for OPDS v2.0 feed.""" import json from collections.abc import Mapping, Sequence from datetime import datetime from math import floor from types import MappingProxyType from typing import override from django.db.models import F, QuerySet from codex.models.base import BaseModel, NamedModel from codex.models.identifier import Identifier from codex.models.named import Credit, StoryArcNumber from codex.serializers.opds.v2.publication import ( OPDS2PublicationDivinaManifestSerializer, ) from codex.views.auth import GroupACLMixin from codex.views.opds.const import AUTHOR_ROLES, BLANK_TITLE, MimeType, Rel from codex.views.opds.metadata import get_credits, get_m2m_objects from codex.views.opds.v2.const import HrefData, LinkData from codex.views.opds.v2.feed.publications import OPDS2PublicationBaseView _MD_CREDIT_MAP = MappingProxyType( # If OPDS2 is ever popular, make this comprehensive by using comicbox role enums { "author": AUTHOR_ROLES, "translator": {"Translator"}, "editor": {"Editor"}, "artist": {"CoverArtist", "Cover", "Artist"}, "illustrator": {"Illustrator"}, "letterer": {"Letterer"}, "penciller": {"Penciller"}, "colorist": {"Colorist", "Colors"}, "inker": {"Inker", "Inks"}, "contributor": {"Contributor"}, "narrator": {"Narrator"}, } ) _PUBLICATION_FIELD_MAP: MappingProxyType[str, str] = MappingProxyType( { "description": "summary", "publisher": "publisher_name", "imprint": "imprint_name", } ) _PUBLICATION_METHOD_KEYS: tuple[str, ...] = ( "identifier", "belongs_to", "subject", ) class OPDS2ManifestMetadataView(OPDS2PublicationBaseView): """Publication Manifest Divina Extended Metadata.""" def _publication_identifier(self, obj) -> str: rel = GroupACLMixin.get_rel_prefix(Identifier) comic_filter = {rel + "in": [obj.pk]} identifiers = ( Identifier.objects.filter(**comic_filter) .annotate(source_name=F("source__name")) .only("id_type", "key") .order_by("source_name", "key") ) urns = [] for identifier in identifiers: urn = f"{identifier.source_name}:{identifier.id_type}:{identifier.key}" # pyright: ignore[reportAttributeAccessIssue] urns.append(urn) return ",".join(urns) def _publication_belongs_to_link( self, kwargs: Mapping[str, str | int | Sequence[int]], query_params: Mapping[str, str | int | Mapping], name: str, number: int | None, ) -> list[dict]: href_data = HrefData( kwargs, query_params, url_name="opds:v2:feed", ) link_data = LinkData(Rel.SUB, href_data, mime_type=MimeType.OPDS_JSON) link = self.link(link_data) belongs_to: dict[str, str | list | int] = {"name": name, "links": [link]} if number: belongs_to["number"] = number return [belongs_to] def _publication_belongs_to_series(self, obj): name = obj.series_name if obj.series.name else BLANK_TITLE pks: list[int] = [obj.series.pk] kwargs: Mapping[str, str | Sequence[int] | int] = { "group": "s", "pks": pks, "page": 1, } number = obj.issue_number ts = floor(datetime.timestamp(obj.updated_at)) query_params = { "ts": ts, "topGroup": "p", } return self._publication_belongs_to_link(kwargs, query_params, name, number) def _publication_belongs_to_folder(self, obj) -> list: if not self.is_allowed(obj): return [] folder = obj.parent_folder name = folder.path pks = [folder.pk] kwargs = {"group": "f", "pks": pks, "page": 1} number = None ts = floor(datetime.timestamp(obj.updated_at)) query_params = {"ts": ts, "topGroup": "f"} return self._publication_belongs_to_link(kwargs, query_params, name, number) def _publication_belongs_to_story_arcs(self, obj) -> list: story_arcs = [] rel = GroupACLMixin.get_rel_prefix(StoryArcNumber) comic_filter = {rel + "in": [obj.pk]} story_arc_numbers = ( StoryArcNumber.objects.filter(**comic_filter) .only("story_arc", "number") .order_by("story_arc__name") ) for story_arc_number in story_arc_numbers: story_arc = story_arc_number.story_arc name = story_arc.name or BLANK_TITLE pks = [story_arc.pk] number = story_arc_number.number kwargs = {"group": "a", "pks": pks, "page": 1} ts = floor(datetime.timestamp(obj.updated_at)) query_params = {"ts": ts, "topGroup": "a"} story_arc = self._publication_belongs_to_link( kwargs, query_params, name, number ) story_arcs += story_arc return story_arcs def _publication_belongs_to(self, obj) -> dict: belongs_to = {} if series := self._publication_belongs_to_series(obj): belongs_to["series"] = series if folder := self._publication_belongs_to_folder(obj): belongs_to["collection"] = folder if story_arcs := self._publication_belongs_to_story_arcs(obj): belongs_to["storyArc"] = story_arcs return belongs_to def _add_tag_link(self, obj: BaseModel, filter_key: str, subfield: str = ""): kwargs = {"group": "s", "pks": (), "page": 1} value: NamedModel = getattr(obj, subfield) if subfield else obj # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] filters = {filter_key: [value.pk]} filters = json.dumps(filters) query_params = { "topGroup": "s", "filters": filters, "title": value.name, } href_data = HrefData(kwargs, query_params, url_name="opds:v2:feed") link_data = LinkData( Rel.FACET, href_data, mime_type=MimeType.OPDS_JSON, ) link = self.link(link_data) obj.links = (link,) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] def _publication_subject(self, obj) -> tuple[NamedModel, ...]: m2m_objs = get_m2m_objects(obj.ids) flat_subjs = [] for key, subjs in m2m_objs.items(): filter_key = key + "s" for subj in subjs: self._add_tag_link(subj, filter_key) flat_subjs.append(subj) return tuple(flat_subjs) def _add_credits(self, pks, roles) -> QuerySet[Credit] | None: """Add credits to metadata.""" if credit_objs := get_credits(pks, roles, exclude=False): for credit_obj in credit_objs: self._add_tag_link(credit_obj, "credits", "person") return credit_objs return None def _publication_credits(self, obj) -> Mapping[str, tuple[Credit, ...]]: credit_md = {} for key, roles in _MD_CREDIT_MAP.items(): if credit_objs := self._add_credits(obj.ids, roles): credit_md[key] = credit_objs return credit_md @override def _publication_metadata(self, obj, zero_pad) -> dict: md = super()._publication_metadata(obj, zero_pad) # Direct attribute to key mappings for md_key, attr in _PUBLICATION_FIELD_MAP.items(): if value := getattr(obj, attr, None): md[md_key] = value # Special cases with transforms if lang := obj.language: md["language"] = lang.name if layout := obj.reading_direction: md["layout"] = "scrolled" if layout == "ttb" else layout # Method-based keys for key in _PUBLICATION_METHOD_KEYS: if value := getattr(self, f"_publication_{key}")(obj): md[key] = value if credit_md := self._publication_credits(obj): md.update(credit_md) return md class OPDS2ManifestView(OPDS2ManifestMetadataView): """Single publication manifest view.""" serializer_class = OPDS2PublicationDivinaManifestSerializer def _publication_reading_order(self, obj) -> list: """ Reader manifest for OPDS 2.0. This part of the spec is redundant, but required. """ reading_order = [] if not obj: return reading_order ts = floor(datetime.timestamp(obj.updated_at)) query_params = {"ts": ts} for page_num in range(obj.page_count): kwargs = {"pk": obj.pk, "page": page_num} href_data = HrefData( kwargs, query_params, url_name="opds:bin:page", min_page=0, max_page=obj.page_count, ) href = self.href(href_data) page = { "href": href, # type is required, but not calculated for efficiency. "type": MimeType.JPEG, # height and width not pre-calculated and fortunately not required by Stump } reading_order.append(page) return reading_order def _cover(self, obj) -> list: images = [] if not obj: return images ts = floor(datetime.timestamp(obj.updated_at)) pk = obj.ids[0] kwargs = {"pk": pk, "page": 0} query_params = {"ts": ts, "bookmark": False, "pixmap": True} image_href_data = HrefData( kwargs, query_params, url_name="opds:bin:page", min_page=0, ) image_link_data = LinkData( Rel.IMAGE, image_href_data, mime_type=MimeType.JPEG, # Include dummy heights just to pass client validation height=0, width=0, authenticate=self.auth_link, ) image_link = self.link(image_link_data) images.append(image_link) return images @override def _publication(self, obj, zero_pad) -> dict: pub = super()._publication(obj, zero_pad) # DiViNa manifest uses resources instead of images if resources := self._cover(obj): pub["resources"] = resources if reading_order := self._publication_reading_order(obj): pub["reading_order"] = reading_order return pub @override def get_object(self) -> MappingProxyType: """Get one publication object.""" book_qs, _, zero_pad = self.get_book_qs() obj = book_qs.first() return MappingProxyType(self._publication(obj, zero_pad)) ================================================ FILE: codex/views/opds/v2/progression.py ================================================ """OPDS 2 Progression view.""" # https://github.com/opds-community/drafts/discussions/67#discussioncomment-6414507 from http import HTTPStatus from types import MappingProxyType from typing import TYPE_CHECKING, Any, override from dateparser import parse from django.db.models import QuerySet from django.db.models.expressions import F, Value from django.db.models.fields import FloatField from django.db.models.functions.comparison import Cast, Coalesce, Greatest, Least from django.db.models.query_utils import FilteredRelation from loguru import logger from rest_framework.exceptions import ValidationError from rest_framework.parsers import JSONParser from rest_framework.renderers import JSONRenderer from rest_framework.response import Response from codex.models.comic import Comic from codex.serializers.opds.v2.progression import OPDS2ProgressionSerializer from codex.util import max_none from codex.views.auth import AuthFilterGenericAPIView from codex.views.bookmark import BookmarkFilterMixin, BookmarkPageMixin from codex.views.const import GROUP_MODEL_MAP from codex.views.exceptions import NoContent from codex.views.opds.auth import OPDSAuthMixin from codex.views.opds.v2.const import HrefData from codex.views.opds.v2.href import OPDS2HrefMixin if TYPE_CHECKING: from codex.models.groups import BrowserGroupModel _EMPTY_DEVICE = MappingProxyType( { "id": "", "name": "", } ) READIUM_PROGRESSION_MIME_TYPE = "application/vnd.readium.progression+json" class ReadiumProgressionParser(JSONParser): """Parses Readium Progression as JSON.""" media_type = READIUM_PROGRESSION_MIME_TYPE class ReadiumProgressionAPIRenderer(JSONRenderer): """Renders Readium Progression as a JSON.""" media_type = READIUM_PROGRESSION_MIME_TYPE # This is an independent api requiring a separate get. class OPDS2ProgressionView( OPDSAuthMixin, OPDS2HrefMixin, BookmarkPageMixin, BookmarkFilterMixin, AuthFilterGenericAPIView, ): """OPDS 2 Progression view.""" parser_classes = ( ReadiumProgressionParser, *AuthFilterGenericAPIView.parser_classes, ) renderer_classes = ( ReadiumProgressionAPIRenderer, *AuthFilterGenericAPIView.renderer_classes, ) serializer_class = OPDS2ProgressionSerializer content_type = ReadiumProgressionParser.media_type def __init__(self, *args, **kwargs) -> None: """Initialize Bookmark Filter.""" self.init_bookmark_filter() super().__init__(*args, **kwargs) self._obj: BrowserGroupModel = Comic() self._user_agent_name: str | None = None # pyright: ignore[reportIncompatibleUnannotatedOverride] @property def modified(self): """Get modified from bookmark.""" return self._obj.bookmark_updated_at # pyright: ignore[reportAttributeAccessIssue], #ty: ignore[unresolved-attribute] @property def device(self): """Codex doesn't record device for progression.""" return _EMPTY_DEVICE @property def title(self) -> str: """The locator title is the page number.""" return f"Page {self._obj.page}" # pyright: ignore[reportAttributeAccessIssue], #ty: ignore[unresolved-attribute] @property def _progression_href(self): """Build a Progression HRef.""" acq_kwargs = { "pk": self._obj.pk, "page": self._obj.page, # pyright: ignore[reportAttributeAccessIssue], #ty: ignore[unresolved-attribute] } max_page = max_none(self._obj.page_count - 1, 0) # pyright: ignore[reportAttributeAccessIssue], #ty: ignore[unresolved-attribute] data = HrefData( acq_kwargs, url_name="opds:bin:page", min_page=0, max_page=max_page ) return self.href(data) @property def _locations(self) -> dict[str, Any]: """Build the Locations object.""" # The OPDS v2 progression spec secifies position as > 0. position = max(self._obj.page + 1, 0) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] return { "position": position, "progression": self._obj.progress, # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] "total_progression": self._obj.progress, # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] } @property def locator(self) -> dict[str, Any]: """Build the Locator object.""" return { "title": self.title, # See publication.py:103 "href": self._progression_href, "type": "image/jpeg", "locations": self._locations, } def _get_bookmark_query(self) -> QuerySet: group = self.kwargs.get("group") pk = self.kwargs.get("pk") if not (group and pk): reason = f"Bad primary key for {group}:{pk}" raise ValidationError(reason, code="422") model = GROUP_MODEL_MAP.get(group) if not model: reason = f"No model found for group {group}" raise ValidationError(reason, code="422") acl_filter = self.get_group_acl_filter(model, self.request.user) qs = model.objects.filter(acl_filter).distinct() bm_rel = self.get_bm_rel(model) bm_filter = self.get_my_bookmark_filter(bm_rel) return qs.annotate( my_bookmark=FilteredRelation("bookmark", condition=bm_filter), bookmark_updated_at=F("my_bookmark__updated_at"), ) @override def get_object(self) -> dict[str, Any]: """Build the progression data object.""" pk = self.kwargs.get("pk") qs = self._get_bookmark_query() progress = Least( F("page") / Greatest(Cast(F("page_count"), FloatField()), 1.0), Value(1.0) ) qs = ( qs.annotate( page=Coalesce(F("my_bookmark__page"), 0), progress=progress, ) .only("page_count") .distinct() ) self._obj = qs.get(pk=pk) if not self._obj.bookmark_updated_at: # pyright: ignore[reportAttributeAccessIssue] raise NoContent return { "modified": self.modified, "device": self.device, "locator": self.locator, } def get(self, *args, **kwargs) -> Response: """Get Response.""" try: obj = self.get_object() serializer = self.get_serializer(obj) return Response(serializer.data) except Comic.DoesNotExist: return Response(status=HTTPStatus.NOT_FOUND) except NoContent: return Response(status=HTTPStatus.NO_CONTENT) except Exception as exc: logger.error("Error in OPDS v2 progression API") logger.exception(exc) raise def put(self, *_args, **_kwargs) -> Response: """Update the bookmark.""" data = self.request.data serializer = self.get_serializer(data=data, partial=True) serializer.is_valid(raise_exception=True) data = serializer.validated_data conflict = False status_code = HTTPStatus.BAD_REQUEST if new_modified_str := data.get("modified"): new_modified = parse(new_modified_str) qs = self._get_bookmark_query() comic = qs.first() conflict = comic and comic.bookmark_updated_at > new_modified # Update anyway on missing modified. Liberal acceptance, not according to spec. if conflict: status_code = HTTPStatus.CONFLICT else: position: int | None = ( data.get("locator", {}).get("locations", {}).get("position") ) if position is not None: # The OPDS v2 progression spec secifies position as > 0. page = max(position - 1, 0) self.kwargs["page"] = page max(position - 1, 0) self.update_bookmark() status_code = HTTPStatus.OK return Response(status=status_code) ================================================ FILE: codex/views/public.py ================================================ """Public non-authenticated views.""" from typing import override from rest_framework.generics import GenericAPIView from rest_framework.mixins import RetrieveModelMixin from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.choices.admin import AdminFlagChoices from codex.models import AdminFlag from codex.serializers.auth import AuthAdminFlagsSerializer _ADMIN_FLAG_KEYS = ( AdminFlagChoices.BANNER_TEXT.value, AdminFlagChoices.LAZY_IMPORT_METADATA.value, AdminFlagChoices.NON_USERS.value, AdminFlagChoices.REGISTRATION.value, ) class AdminFlagsView(GenericAPIView, RetrieveModelMixin): """Get admin flags relevant to auth.""" serializer_class: type[BaseSerializer] | None = AuthAdminFlagsSerializer queryset = AdminFlag.objects.filter(key__in=_ADMIN_FLAG_KEYS).only( "key", "on", "value" ) @override def get_object(self) -> dict: """Get admin flags.""" flags = {} for obj in self.get_queryset(): name = AdminFlagChoices(obj.key).name.lower() val = obj.value if obj.key == AdminFlagChoices.BANNER_TEXT.value else obj.on flags[name] = val return flags def get(self, *args, **kwargs) -> Response: """Get admin flags relevant to auth.""" return self.retrieve(*args, **kwargs) ================================================ FILE: codex/views/pwa.py ================================================ """PWA views.""" from codex.views.template import CodexTemplateView class WebManifestView(CodexTemplateView): """Serve the webmanifest spec.""" template_name = "pwa/manifest.webmanifest" content_type = "application/manifest+json" class ServiceWorkerRegisterView(CodexTemplateView): """Serve the serviceworker register javascript.""" template_name = "pwa/serviceworker-register.js" content_type = "application/javascript" class ServiceWorkerView(CodexTemplateView): """Serve the serviceworker javascript.""" template_name = "pwa/serviceworker.js" content_type = "application/javascript" ================================================ FILE: codex/views/reader/__init__.py ================================================ """Reader views.""" ================================================ FILE: codex/views/reader/arcs.py ================================================ """Reader get Arcs methods.""" from datetime import UTC, datetime from typing import TYPE_CHECKING from codex.choices.admin import AdminFlagChoices from codex.models import AdminFlag from codex.models.comic import Comic from codex.models.functions import JsonGroupArray from codex.models.named import StoryArc from codex.util import max_none from codex.views.const import ( EPOCH_START, STORY_ARC_GROUP, ) from codex.views.reader.params import ReaderParamsView if TYPE_CHECKING: from collections.abc import Mapping _COMIC_ARC_FIELD_NAMES = ("series", "volume", "parent_folder") _STORY_ARC_ONLY = ("name", "ids", "updated_ats") _UPDATED_ATS_DATE_FORMAT_STR = "%Y-%m-%d %H:%M:%S.%f" class ReaderArcsView(ReaderParamsView): """Reader get Arcs methods.""" def _get_field_names(self) -> tuple: field_names = [] for field_name in _COMIC_ARC_FIELD_NAMES: if field_name == "parent_folder": efv_flag = ( AdminFlag.objects.only("on") .get(key=AdminFlagChoices.FOLDER_VIEW.value) .on ) if not efv_flag: continue else: show: Mapping = self.get_from_settings( # pyright: ignore[reportAssignmentType] "show", browser=True ) group = field_name[0] if not show.get(group): continue field_names.append(field_name) return tuple(field_names) @staticmethod def _get_group_arc( comic: Comic, field_name: str, arcs: dict, max_mtime: int | None, ): """Append the series, volume, or folder arc from the comic's own FKs.""" group = getattr(comic, field_name) arc_ids = (group.pk,) mtime = group.updated_at max_mtime = max_none(max_mtime, mtime) group_letter = "f" if field_name == "parent_folder" else field_name[0] arcs[group_letter] = {arc_ids: {"name": group.name, "mtime": mtime}} return max_mtime def _get_story_arcs(self, comic: Comic, arcs, max_mtime: int | None): """Append the story arcs.""" qs = StoryArc.objects.filter(storyarcnumber__comic__pk=comic.pk) if not qs.exists(): return max_mtime qs = qs.group_by("sort_name") # pyright: ignore[reportAttributeAccessIssue] qs = qs.annotate( ids=JsonGroupArray("id", distinct=True, order_by="id"), updated_ats=JsonGroupArray( "updated_at", distinct=True, order_by="updated_at" ), ) qs = qs.order_by("sort_name").only("name") arcs[STORY_ARC_GROUP] = {} for sa in qs: arc = {"name": sa.name} ids = tuple(sorted(set(sa.ids))) updated_ats = ( datetime.strptime(ua, _UPDATED_ATS_DATE_FORMAT_STR).replace(tzinfo=UTC) for ua in sa.updated_ats ) mtime = max_none(EPOCH_START, *updated_ats) arc["mtime"] = mtime max_mtime = max_none(max_mtime, mtime) arcs[STORY_ARC_GROUP][ids] = arc return max_mtime def _set_selected_arc(self, arcs) -> None: arc = self.params["arc"] arc_group = arc["group"] requested_arc_ids = arc.get("ids", ()) arc_id_infos = arcs.get(arc_group) all_arc_ids: frozenset[tuple[int, ...]] = ( frozenset(arc_id_infos.keys()) if arc_id_infos else frozenset() ) arc_ids = () if arc_group == STORY_ARC_GROUP: if requested_arc_ids in all_arc_ids: arc_ids = requested_arc_ids else: for arc_ids in all_arc_ids: if requested_arc_ids.intersection(frozenset(arc_ids)): break if not arc_ids: arc_ids = next(iter(all_arc_ids)) self._selected_arc_group = arc_group self._selected_arc_ids = arc_ids def get_arcs(self) -> tuple[dict, int | None]: """Get all series/folder/story arcs.""" field_names = self._get_field_names() comic_pk = self.kwargs.get("pk") comic = ( Comic.objects.select_related(*field_names) .only(*field_names) .get(pk=comic_pk) ) arcs = {} max_mtime = None for field_name in field_names: max_mtime = self._get_group_arc(comic, field_name, arcs, max_mtime) max_mtime = self._get_story_arcs(comic, arcs, max_mtime) self._set_selected_arc(arcs) return arcs, max_mtime ================================================ FILE: codex/views/reader/books.py ================================================ """Get Books methods.""" from django.db.models import F from django.db.models.query import Q, QuerySet from django.urls import reverse from rest_framework.exceptions import NotFound from codex.models import Comic from codex.models.bookmark import Bookmark from codex.models.settings import SettingsReader from codex.serializers.redirect import ReaderRedirectSerializer from codex.views.bookmark import BookmarkAuthMixin from codex.views.browser.filters.field import ComicFieldFilterView from codex.views.const import ( FOLDER_GROUP, GROUP_RELATION, NONE_INTEGERFIELD, STORY_ARC_GROUP, ) from codex.views.mixins import SharedAnnotationsMixin from codex.views.reader.arcs import ReaderArcsView _COMIC_FIELDS = ( "file_type", "issue_number", "issue_suffix", "page_count", "series", "volume", "reading_direction", "updated_at", ) _ISSUE_ORDERING = ( "issue_number", "issue_suffix", "sort_name", ) class ReaderBooksView(ReaderArcsView, SharedAnnotationsMixin, BookmarkAuthMixin): """Get Books methods.""" def _get_reader_settings_auth_filter(self) -> dict: """Get the auth filter for SettingsReader queries.""" auth_filter = self.get_bookmark_auth_filter() # Add client type for SettingsReader. return {**auth_filter, "client": "api"} def _append_with_settings(self, book): """Append per-comic reader settings and bookmark to book.""" reader_auth = self._get_reader_settings_auth_filter() book.settings = SettingsReader.objects.filter(**reader_auth, comic=book).first() bookmark_auth = self.get_bookmark_auth_filter() book.bookmark = ( Bookmark.objects.filter(**bookmark_auth, comic=book) .only("page", "finished") .first() ) return book def _raise_not_found(self) -> None: """Raise not found exception.""" pk = self.kwargs.get("pk") detail = { "route": reverse("app:start"), "reason": f"comic {pk} not found", "serializer": ReaderRedirectSerializer, } raise NotFound(detail=detail) def _get_comics_filter(self, rel): """Build the filter.""" group_acl_filter = self.get_group_acl_filter(Comic, self.request.user) nav_filter = {f"{rel}__in": self._selected_arc_ids} query_filter = group_acl_filter & Q(**nav_filter) if browser_filters := self.get_from_settings("filters", browser=True): # no search at this time. query_filter &= ComicFieldFilterView.get_all_comic_field_filters( "", browser_filters ) return query_filter def _get_comics_annotation_and_ordering( self, model, ordering ) -> tuple[dict, tuple]: """Get ordering for query.""" sort_name_annotations = {} if self._selected_arc_group in "sv": parent_group = "i" if self._selected_arc_group == "s" else "s" show = self.get_from_settings("show", browser=True) sort_name_annotations = self.get_sort_name_annotations( model, parent_group, self._selected_arc_ids, show ) if sort_name_annotations and model is Comic: ordering += (*sort_name_annotations.keys(),) ordering += _ISSUE_ORDERING return sort_name_annotations, tuple(ordering) def _get_comics_list(self) -> QuerySet: """Get the reader navigation group filter.""" rel = GROUP_RELATION[self._selected_arc_group] fields = _COMIC_FIELDS arc_pk_rel = rel + "__pk" arc_index = NONE_INTEGERFIELD select_related = () prefetch_related = () ordering = () if self._selected_arc_group == STORY_ARC_GROUP: arc_index = F("story_arc_numbers__number") prefetch_related = (*prefetch_related, rel) ordering = ("arc_index", "date", "pk") elif self._selected_arc_group == FOLDER_GROUP: fields = (*_COMIC_FIELDS, rel) select_related = (rel,) ordering = ("path", "pk") query_filter = self._get_comics_filter(rel) qs = Comic.objects.filter(query_filter) if prefetch_related: qs = qs.prefetch_related(*prefetch_related) if select_related: qs = qs.select_related(*select_related) qs = qs.only(*fields) qs = self.annotate_group_names(qs) qs = qs.annotate( volume_number_to=(F("volume__number_to")), issue_count=F("volume__issue_count"), arc_pk=F(arc_pk_rel), arc_index=arc_index, mtime=F("updated_at"), has_metadata=F("metadata_mtime"), ) sort_names_alias, ordering = self._get_comics_annotation_and_ordering( qs.model, ordering ) if sort_names_alias: qs = qs.alias(**sort_names_alias) return qs.order_by(*ordering) def get_book_collection(self) -> dict: """ Get the -1, +1 window around the current issue. Uses iteration in python. There are some complicated ways of doing this with __gt[0] & __lt[0] in the db, but I think they might be even more expensive. Yields 1 to 3 books """ comics = self._get_comics_list() books = {} prev_book = None pk = self.kwargs.get("pk") for index, book in enumerate(comics): if books: # after match set next comic and break books["next"] = self._append_with_settings(book) break if book.pk == pk: # first match. set previous and current comic if prev_book: books["prev"] = self._append_with_settings(prev_book) # create extra current book attrs: book.filename = book.get_filename() self._selected_arc_index = index + 1 self._selected_arc_count = comics.count() books["current"] = self._append_with_settings(book) else: # Haven't matched yet, so set the previous comic prev_book = book if not books.get("current"): self._raise_not_found() return books ================================================ FILE: codex/views/reader/page.py ================================================ """Views for reading comic books.""" from comicbox.box import Comicbox from django.http import HttpResponse from drf_spectacular.types import OpenApiTypes from drf_spectacular.utils import OpenApiParameter, extend_schema from loguru import logger from pdffile import PageFormat from rest_framework.exceptions import NotFound from codex.librarian.bookmark.tasks import BookmarkUpdateTask from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.models.choices import FileTypeChoices from codex.models.comic import Comic from codex.settings import COMICBOX_CONFIG, FALSY from codex.views.auth import AuthFilterAPIView from codex.views.bookmark import BookmarkAuthMixin _PDF_MIME_TYPE = "application/pdf" _PDF_FORMAT_NON_PDF_TYPES = frozenset( {e.value for e in (PageFormat.PIXMAP, PageFormat.IMAGE)} ) class ReaderPageView(BookmarkAuthMixin, AuthFilterAPIView): """Display a comic page from the archive itself.""" X_MOZ_PRE_HEADERS = frozenset({"prefetch", "preload", "prerender", "subresource"}) content_type = "image/jpeg" def _update_bookmark(self) -> None: """Update the bookmark if the bookmark param was passed.""" do_bookmark = bool( self.request.GET.get("bookmark") and self.request.headers.get("X-moz") not in self.X_MOZ_PRE_HEADERS ) if not do_bookmark: return auth_filter = self.get_bookmark_auth_filter() comic_pks = (self.kwargs.get("pk"),) page = self.kwargs.get("page") updates = {"page": page} task = BookmarkUpdateTask(auth_filter, comic_pks, updates) LIBRARIAN_QUEUE.put(task) def _get_page_image(self) -> tuple: """Get the image data and content type.""" # Get comic - Distinct is important group_acl_filter = self.get_group_acl_filter(Comic, self.request.user) qs = Comic.objects.filter(group_acl_filter).only("path", "file_type").distinct() pk = self.kwargs.get("pk") comic = qs.get(pk=pk) # page_image page = self.kwargs.get("page") pdf_format = ( PageFormat.PIXMAP.value if self.request.GET.get("pixmap", "").lower() not in FALSY else "" ) with Comicbox(comic.path, config=COMICBOX_CONFIG, logger=logger) as cb: page_image = cb.get_page_by_index(page, pdf_format=pdf_format) if not page_image: page_image = b"" # content type if ( comic.file_type == FileTypeChoices.PDF.value # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] and pdf_format not in _PDF_FORMAT_NON_PDF_TYPES ): content_type = _PDF_MIME_TYPE else: content_type = self.content_type return page_image, content_type @extend_schema( parameters=[ OpenApiParameter("bookmark", OpenApiTypes.BOOL, default=True), OpenApiParameter("pixmap", OpenApiTypes.BOOL, default=False), ], responses={ (200, content_type): OpenApiTypes.BINARY, (200, _PDF_MIME_TYPE): OpenApiTypes.BINARY, }, ) def get(self, *_args, **_kwargs) -> HttpResponse: """Get the comic page from the archive.""" try: page_image, content_type = self._get_page_image() self._update_bookmark() except Comic.DoesNotExist as exc: pk = self.kwargs.get("pk") detail = f"comic {pk} not found in db." raise NotFound(detail=detail) from exc except FileNotFoundError as exc: pk = self.kwargs.get("pk") detail = f"comic path for {pk} not found: {exc}." raise NotFound(detail=detail) from exc except Exception as exc: logger.warning(exc) raise NotFound(detail="comic page not found") from exc else: return HttpResponse(page_image, content_type=content_type) ================================================ FILE: codex/views/reader/params.py ================================================ """Views for reading comic books.""" from types import MappingProxyType from typing import Any from loguru import logger from codex.serializers.fields.reader import VALID_ARC_GROUPS from codex.serializers.reader import ReaderViewInputSerializer from codex.views.reader.settings import ReaderSettingsBaseView class ReaderParamsView(ReaderSettingsBaseView): """Reader initialization.""" input_serializer_class = ReaderViewInputSerializer def __init__(self, *args, **kwargs) -> None: """Initialize instance vars.""" super().__init__(*args, **kwargs) self._group_pks: dict[str, tuple[int, ...]] = {} self._params: MappingProxyType[str, Any] | None = None def _ensure_arc_group(self, params: dict[str, Any]) -> None: arc = params.get("arc", {}) group = arc.get("group", "") if not group: top_group: str = self.get_from_settings( # pyright: ignore[reportAssignmentType] "top_group", browser=True ) group = "s" if top_group in "rpi" else top_group if group not in VALID_ARC_GROUPS: group = "s" params["arc"]["group"] = group @staticmethod def _ensure_arc_ids(params: dict[str, Any]) -> None: arc = params.get("arc", {}) if ids := arc.get("ids"): ids = tuple(sorted(set(filter(lambda x: x > 0, ids)))) else: ids = () params["arc"]["ids"] = ids def _ensure_arc(self, params: dict[str, Any]) -> None: """Ensure the group is valid.""" if "arc" not in params: params["arc"] = {} self._ensure_arc_group(params) self._ensure_arc_ids(params) @property def params(self): """Memoized params property.""" if self._params is None: try: serializer = self.input_serializer_class(data=self.request.GET) serializer.is_valid(raise_exception=True) params = self.load_params_from_settings() if serializer.validated_data: params.update(serializer.validated_data) self._ensure_arc(params) self.save_params_to_settings(params) self._params = MappingProxyType(params) except Exception: logger.exception("validate") raise return self._params ================================================ FILE: codex/views/reader/reader.py ================================================ """Views for reading comic books.""" from collections.abc import Mapping from typing import override from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.serializers.reader import ReaderComicsSerializer, ReaderViewInputSerializer from codex.views.reader.books import ReaderBooksView class ReaderView(ReaderBooksView): """Get info for displaying comic pages.""" serializer_class: type[BaseSerializer] | None = ReaderComicsSerializer TARGET: str = "reader" @override def get_object(self) -> dict[str, Mapping | int | None]: """Get the previous and next comics in a group or story arc.""" # get_arcs & get_book_collection populates those arc self valirables. # So order is important. arcs, mtime = self.get_arcs() books = self.get_book_collection() arc = { "group": self._selected_arc_group, "ids": self._selected_arc_ids, "index": self._selected_arc_index, "count": self._selected_arc_count, } close_route = self.get_last_route() return { "arc": arc, "arcs": arcs, "books": books, "close_route": close_route, "mtime": mtime, } @extend_schema(parameters=[ReaderViewInputSerializer]) def get(self, *args, **kwargs) -> Response: """Get the book info.""" obj = self.get_object() serializer = self.get_serializer(obj) return Response(serializer.data) ================================================ FILE: codex/views/reader/settings.py ================================================ """Reader settings views.""" from types import MappingProxyType from typing import TYPE_CHECKING from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.exceptions import ValidationError from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.models import Comic, Folder, Series from codex.models.named import StoryArc from codex.models.settings import ClientChoices, SettingsReader from codex.serializers.reader import ( ReaderScopedUpdateSerializer, ReaderSettingsSerializer, ) from codex.views.settings import NULL_VALUES, SettingsBaseView if TYPE_CHECKING: from rest_framework.request import Request # scope letter → (SettingsReader FK field, Comic FK for auto-resolve, Model for name) # "g" = global (no FK). "c" = comic. # p/i/s/v all resolve to series scope. "f" = folder. "a" = story_arc. # _GLOBAL_SCOPE = "g" _COMIC_SCOPE = "c" _SCOPE_MAP = MappingProxyType( { _GLOBAL_SCOPE: (None, None, None), _COMIC_SCOPE: ("comic_id", None, None), "p": ("series_id", "series_id", Series), "i": ("series_id", "series_id", Series), "s": ("series_id", "series_id", Series), "v": ("series_id", "series_id", Series), "f": ("folder_id", "parent_folder_id", Folder), "a": ("story_arc_id", None, StoryArc), } ) class ReaderSettingsBaseView(SettingsBaseView): """Reader settings — model config, defaults, reset, and scope lookups.""" MODEL = SettingsReader CLIENT = ClientChoices.API FILTER_ARGS = MappingProxyType( { "comic__isnull": True, "series__isnull": True, "folder__isnull": True, "story_arc__isnull": True, } ) CREATE_ARGS = MappingProxyType( {"comic": None, "series": None, "folder": None, "story_arc": None} ) # ── Defaults & reset ──────────────────────────────────────────── @classmethod def get_reader_default_params(cls) -> dict: """Derive reader default params from model field metadata.""" return { key: cls._get_field_default(SettingsReader, key) for key in SettingsReader.DIRECT_KEYS } @classmethod def reset_reader_settings(cls, instance: SettingsReader) -> dict: """Reset reader DIRECT_KEYS to model defaults and return the params dict.""" defaults = cls.get_reader_default_params() for key in SettingsReader.DIRECT_KEYS: setattr(instance, key, defaults[key]) instance.save() return defaults # ── Auth + scope lookups ──────────────────────────────────────── # (inlined from the former _ReaderSettingsAuthMixin / BookmarkAuthMixin) def _get_bookmark_auth_filter(self) -> dict[str, int | str | None]: """Filter only the current user's settings rows.""" if TYPE_CHECKING: self.request: Request if self.request.user.is_authenticated: return {"user_id": self.request.user.pk} if not self.request.session or not self.request.session.session_key: logger.debug("no session, make one") self.request.session.save() return {"session_id": self.request.session.session_key} def _get_settings_lookup(self, **extra): """Build the base lookup for a SettingsReader query.""" auth_filter = self._get_bookmark_auth_filter() return {"client": ClientChoices.API, **auth_filter, **extra} @staticmethod def _instance_to_dict(instance: SettingsReader | None) -> dict | None: """Convert a SettingsReader to a settings dict, or None.""" if instance is None: return None return {key: getattr(instance, key) for key in instance.DIRECT_KEYS} def _get_global_settings(self) -> SettingsReader: """Get or create the global reader settings row.""" base_lookup = self._get_settings_lookup() filter_kwargs = { **base_lookup, **self.FILTER_ARGS, } instance = SettingsReader.objects.filter(**filter_kwargs).first() if instance is not None: return instance return SettingsReader.objects.create(**base_lookup, **self.CREATE_ARGS) def _get_scoped_settings(self, scope_fk_field: str, scope_pk: int): """Get a scoped SettingsReader row or None.""" lookup = self._get_settings_lookup(**{scope_fk_field: scope_pk}) return SettingsReader.objects.filter(**lookup).first() def _get_or_create_scoped_settings( self, scope_fk_field: str, scope_pk: int ) -> SettingsReader: """Get or create a scoped SettingsReader row.""" lookup = self._get_settings_lookup(**{scope_fk_field: scope_pk}) instance = SettingsReader.objects.filter(**lookup).first() if instance is not None: return instance return SettingsReader.objects.create(**lookup) class ReaderSettingsView(ReaderSettingsBaseView): """ Consolidated reader settings endpoint. Mounted at both ``c/settings`` (no comic context) and ``c//settings`` (comic context available). GET — request one or more scopes via ``?scopes=g,s,c``. When *pk* is in the URL the view can auto-resolve intermediate scope pks from the comic. ``?story_arc_pk=`` is required for the *a* scope. PATCH — send ``scope`` (g/c/s/f/a) and, for every scope except *g*, ``scope_pk`` together with the settings fields to update. """ serializer_class: type[BaseSerializer] | None = ReaderScopedUpdateSerializer # ── GET helpers ───────────────────────────────────────────────── def _resolve_scope_pk( self, scope: str, comic_fk: str | None, comic: Comic | None, ) -> int | None: """Return the scope pk for an intermediate scope, or None.""" if scope == "a": raw = self.request.GET.get("story_arc_pk") return int(raw) if raw else None if comic and comic_fk: return getattr(comic, comic_fk, None) return None @staticmethod def _canonical_scope(scope: str) -> str: """Normalise arc-group aliases (p/i/v) to their canonical scope letter.""" if scope in "piv": return "s" return scope def _get_scope(self, scope, scopes_out, comic, scope_info) -> None: config = _SCOPE_MAP.get(scope) if config is None: return fk_field, comic_fk, model = config canon = self._canonical_scope(scope) if scope == _GLOBAL_SCOPE: instance = self._get_global_settings() scopes_out[canon] = self._instance_to_dict(instance) elif scope == _COMIC_SCOPE: if comic: instance = self._get_scoped_settings("comic_id", comic.pk) scopes_out[canon] = self._instance_to_dict(instance) else: scope_pk = self._resolve_scope_pk(scope, comic_fk, comic) if scope_pk and fk_field: instance = self._get_scoped_settings(fk_field, scope_pk) scopes_out[canon] = self._instance_to_dict(instance) name = ( ( model.objects.filter(pk=scope_pk) .values_list("name", flat=True) .first() or "" ) if model else "" ) scope_info[canon] = {"pk": scope_pk, "name": name} # ── HTTP methods ──────────────────────────────────────────────── @extend_schema(responses=None) def get(self, *args, **kwargs) -> Response: """Return settings for one or more scopes.""" scopes_str = self.request.GET.get("scopes", _GLOBAL_SCOPE) requested = scopes_str.split(",") comic_pk: int | None = self.kwargs.get("pk") # Pre-fetch comic once if any non-g/c scope needs it. comic: Comic | None = None needed_comic_fks = set() for scope in requested: config = _SCOPE_MAP.get(scope) if config and config[1]: needed_comic_fks.add(config[1]) if needed_comic_fks and comic_pk: comic = Comic.objects.only(*needed_comic_fks).get(pk=comic_pk) scopes_out: dict = {} scope_info: dict = {} for scope in requested: self._get_scope(scope, scopes_out, comic, scope_info) data = {"scopes": scopes_out, "scope_info": scope_info} return Response(data) @extend_schema( request=ReaderScopedUpdateSerializer, responses=ReaderSettingsSerializer, ) def patch(self, *args, **kwargs) -> Response: """Update settings for a single scope.""" serializer = self.get_serializer(data=self.request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data scope = data.pop("scope") scope_pk = data.pop("scope_pk", None) config = _SCOPE_MAP.get(scope) if config is None: raise ValidationError({"scope": f"Invalid scope: {scope}"}) if scope == _GLOBAL_SCOPE: # Reject null/blank updates for global to preserve defaults. data = {k: v for k, v in data.items() if v not in NULL_VALUES} instance = self._get_global_settings() else: if not scope_pk: raise ValidationError( {"scope_pk": "scope_pk is required for non-global scopes."} ) fk_field = config[0] instance = self._get_or_create_scoped_settings(fk_field, scope_pk) # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] for key, value in data.items(): if key in instance.DIRECT_KEYS: setattr(instance, key, value) instance.save() result = self._instance_to_dict(instance) output_serializer = ReaderSettingsSerializer(result) return Response(output_serializer.data) @extend_schema( request=ReaderScopedUpdateSerializer, responses=ReaderSettingsSerializer, ) def delete(self, *args, **kwargs) -> Response: """Reset settings for a single scope to model defaults.""" serializer = self.get_serializer(data=self.request.data) serializer.is_valid(raise_exception=True) data = serializer.validated_data scope = data.get("scope", _GLOBAL_SCOPE) scope_pk = data.get("scope_pk") config = _SCOPE_MAP.get(scope) if config is None: raise ValidationError({"scope": f"Invalid scope: {scope}"}) if scope == _GLOBAL_SCOPE: # Global scope: reset to model defaults (can't delete). instance = self._get_global_settings() result = self.reset_reader_settings(instance) else: if not scope_pk: raise ValidationError( {"scope_pk": "scope_pk is required for non-global scopes."} ) fk_field = config[0] # Delete the scoped row entirely — absence means "inherit". lookup = self._get_settings_lookup(**{fk_field: scope_pk}) # pyright: ignore[reportCallIssue], # ty: ignore[invalid-argument-type] SettingsReader.objects.filter(**lookup).delete() result = None output_serializer = ReaderSettingsSerializer(result) return Response(output_serializer.data) ================================================ FILE: codex/views/settings.py ================================================ """Base class for settings.""" from abc import ABC from collections.abc import Mapping, Sequence from copy import deepcopy from types import MappingProxyType from django.contrib.auth.models import AbstractBaseUser, AnonymousUser from loguru import logger from codex.choices.browser import DEFAULT_BROWSER_ROUTE from codex.models.settings import ( ClientChoices, SettingsBase, SettingsBrowser, SettingsBrowserFilters, SettingsBrowserLastRoute, SettingsBrowserShow, SettingsReader, ) from codex.views.auth import AuthFilterGenericAPIView from codex.views.const import FOLDER_GROUP, STORY_ARC_GROUP CREDIT_PERSON_UI_FIELD = "credits" STORY_ARC_UI_FIELD = "story_arcs" IDENTIFIER_TYPE_UI_FIELD = "identifier_source" SETTINGS_BROWSER_SELECT_RELATED = ("show", "filters", "last_route") BROWSER_FILTER_ARGS = MappingProxyType({"name": ""}) BROWSER_CREATE_ARGS = MappingProxyType({"name": ""}) SHOW_KEYS = frozenset({"p", "i", "s", "v"}) NULL_VALUES: frozenset = frozenset({"", None}) class SettingsBaseView(AuthFilterGenericAPIView, ABC): """ Core settings model access with full read/write support. Provides the low-level read/write interface to the settings database models. Subclasses must set MODEL, CLIENT, FILTER_ARGS, and CREATE_ARGS. """ # Must override these in concrete subclasses. MODEL: type[SettingsReader | SettingsBrowser] CLIENT: ClientChoices FILTER_ARGS: Mapping = MappingProxyType({}) CREATE_ARGS: Mapping = MappingProxyType({}) # Browser settings config for cross-reading (e.g. get_last_route from # a reader view). Defaults to API browser settings; OPDS views # override BROWSER_CLIENT. BROWSER_MODEL: type[SettingsBrowser] = SettingsBrowser BROWSER_CLIENT: ClientChoices = ClientChoices.API # ── Session / user helpers ────────────────────────────────────── def _ensure_session_key(self) -> str | None: """Ensure the Django session is saved and return its key.""" if not self.request.session.session_key: self.request.session.save() return self.request.session.session_key def _get_request_user(self): """Return the authenticated user or None.""" user = self.request.user if user and getattr(user, "pk", None): return user return None # ── Model field introspection ─────────────────────────────────── @staticmethod def _get_field_default(model, field_name): """Get the default value for a model field, calling it if callable.""" default = model._meta.get_field(field_name).default return default() if callable(default) else default # ── Settings row CRUD ─────────────────────────────────────────── @staticmethod def _get_or_create_settings_user( model: type[SettingsReader | SettingsBrowser], user: AbstractBaseUser | AnonymousUser, session_key: str | None, base_filter: Mapping, only: Sequence[str] | None, ) -> SettingsBrowser | SettingsReader | None: instance = model.objects.filter(user=user, **base_filter) if model is SettingsBrowser: instance = instance.select_related(*SETTINGS_BROWSER_SELECT_RELATED) if only: instance = instance.only(*only) instance = instance.first() if instance is None: return None if session_key and instance.session_id != session_key: # pyright: ignore[reportAttributeAccessIssue] # Discard any anonymous row that owns the new session so # the unique constraint isn't violated. model.objects.filter( session_id=session_key, user__isnull=True, **base_filter ).delete() instance.session_id = session_key # pyright: ignore[reportAttributeAccessIssue] instance.save(update_fields=("session_id", "updated_at")) return instance @staticmethod def _get_or_create_settings_session( model: type[SettingsReader | SettingsBrowser], user: AbstractBaseUser | AnonymousUser | None, session_key: str, base_filter: Mapping, only: Sequence[str] | None, ): instance = model.objects.filter(session_id=session_key, **base_filter) if model is SettingsBrowser: instance = instance.select_related(*SETTINGS_BROWSER_SELECT_RELATED) elif only: instance = instance.only(*only) instance = instance.first() if instance is None: return None if user and instance.user_id is None: # Promote anonymous row to a user row (first login). instance.user = user instance.save(update_fields=("user_id", "updated_at")) return instance @staticmethod def _create_browser_settings(user, session_key, client, create_args): """Create a SettingsBrowser with its related show/filters/last_route.""" show, _ = SettingsBrowserShow.objects.get_or_create( p=True, i=False, s=True, v=False, ) instance = SettingsBrowser.objects.create( user=user, session_id=session_key, client=client, show=show, **create_args, ) SettingsBrowserFilters.objects.create(browser=instance) SettingsBrowserLastRoute.objects.create(browser=instance) # Re-fetch with select_related so the reverse OneToOne accessors work. return SettingsBrowser.objects.select_related( *SETTINGS_BROWSER_SELECT_RELATED ).get(pk=instance.pk) def _get_or_create_settings( self, model: type[SettingsReader | SettingsBrowser], client: ClientChoices, filter_args: Mapping, create_args: Mapping, only: Sequence[str] | None = None, ) -> SettingsBase: """ Look up (or create) the settings row for the current user / session. Priority: authenticated user row > session row > create new. Handles login transitions (promoting anonymous session rows to user rows) and keeps the session FK current. """ user = self._get_request_user() session_key = self._ensure_session_key() base_filter = {"client": client, **filter_args} # 1. Authenticated user — look up by user first. if user and ( instance := self._get_or_create_settings_user( model, user, session_key, base_filter, only ) ): return instance # 2. Try by session. if session_key and ( instance := self._get_or_create_settings_session( model, user, session_key, base_filter, only ) ): return instance # 3. Nothing found — create. if model is SettingsBrowser: return self._create_browser_settings( user, session_key, client, create_args, ) return model.objects.create( user=user, session_id=session_key, client=client, **create_args, ) # ── Instance → dict conversion ────────────────────────────────── @staticmethod def browser_instance_to_dict(instance: SettingsBrowser) -> dict: """ Convert a SettingsBrowser instance to the params dict. The serializer handles the q↔search translation on output; here we build the dict that the rest of the view layer consumes. """ result: dict = {} for key in instance.DIRECT_KEYS: result[key] = getattr(instance, key) # Show — from the related SettingsBrowserShow row. show_obj = instance.show result["show"] = {k: getattr(show_obj, k) for k in SHOW_KEYS} # Filters — from the related SettingsBrowserFilters row. filters_obj = instance.filters # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] result["filters"] = { k: getattr(filters_obj, k) for k in SettingsBrowserFilters.FILTER_KEYS } # Last route — from the related SettingsBrowserLastRoute row. route_obj = instance.last_route # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] result["last_route"] = { "group": route_obj.group, "pks": tuple(route_obj.pks) if route_obj.pks else (0,), "page": route_obj.page, } return result @staticmethod def _reader_instance_to_dict(instance: SettingsReader) -> dict: """Convert a SettingsReader instance to the params dict.""" return {key: getattr(instance, key) for key in instance.DIRECT_KEYS} # ── Load (read) ───────────────────────────────────────────────── def _load_settings_data(self, only: Sequence[str] | None = None) -> dict: """Load the settings dict from the view's own model.""" instance = self._get_or_create_settings( self.MODEL, self.CLIENT, self.FILTER_ARGS, self.CREATE_ARGS, only=only, ) if isinstance(instance, SettingsBrowser): return self.browser_instance_to_dict(instance) return self._reader_instance_to_dict(instance) # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] def _load_browser_settings_data(self, only: Sequence[str] | None = None) -> dict: """Load settings from the browser model (for cross-reading).""" instance: SettingsBrowser = self._get_or_create_settings( # pyright: ignore[reportAssignmentType], # ty: ignore[invalid-assignment] self.BROWSER_MODEL, self.BROWSER_CLIENT, BROWSER_FILTER_ARGS, BROWSER_CREATE_ARGS, only=only, ) return self.browser_instance_to_dict(instance) def get_from_settings(self, key: str, default=None, *, browser: bool = False): """Get one key from the session or its default.""" if browser: stored = self._load_browser_settings_data() model = self.BROWSER_MODEL else: stored = self._load_settings_data() model = self.MODEL if stored: return stored.get(key, default) field = model._meta.get_field(key) return field.default def get_last_route(self) -> Mapping: """Get the last route from the browser session.""" if last_route := self.get_from_settings("last_route", browser=True): return last_route return DEFAULT_BROWSER_ROUTE @classmethod def get_browser_default_params(cls) -> dict: """Derive browser default params from model field metadata.""" result: dict = {} for key in SettingsBrowser.DIRECT_KEYS: result[key] = cls._get_field_default(SettingsBrowser, key) result["show"] = { k: cls._get_field_default(SettingsBrowserShow, k) for k in SHOW_KEYS } result["filters"] = { k: cls._get_field_default(SettingsBrowserFilters, k) for k in SettingsBrowserFilters.FILTER_KEYS } last_route_keys = ("group", "pks", "page") result["last_route"] = { k: cls._get_field_default(SettingsBrowserLastRoute, k) for k in last_route_keys } return result def load_params_from_settings(self, only: Sequence[str] | None = None) -> dict: """Get session settings with defaults.""" try: return self._load_settings_data(only=only) except Exception: logger.exception("Loading settings data from model") raise # ── Save (write) ──────────────────────────────────────────────── def _get_browser_order_defaults(self) -> dict: if group := self.kwargs.get("group"): # order_by has a dynamic group based default order_by = ( "filename" if group == FOLDER_GROUP else "story_arc_number" if group == STORY_ARC_GROUP else "sort_name" ) order_defaults = {"order_by": order_by} else: order_defaults = {} return order_defaults @staticmethod def _save_browser_show(instance: SettingsBrowser, show_data: dict) -> None: """Get-or-create a shared SettingsBrowserShow row and assign it.""" show_kwargs = {k: bool(show_data.get(k, False)) for k in SHOW_KEYS} show, _ = SettingsBrowserShow.objects.get_or_create(**show_kwargs) # pyright: ignore[reportArgumentType] instance.show = show @staticmethod def _save_browser_filters( filters_obj: SettingsBrowserFilters, filters_data: dict, ) -> None: """Apply filter values from the params dict to the filters row.""" for key, value in filters_data.items(): if key not in SettingsBrowserFilters.FILTER_KEYS: continue cleaned = value if key == "bookmark" else (list(value) if value else []) setattr(filters_obj, key, cleaned) filters_obj.save() @staticmethod def _save_browser_last_route( route_obj: SettingsBrowserLastRoute, route_data: dict, ) -> None: """Apply last-route values from the params dict to the route row.""" if "group" in route_data: route_obj.group = route_data["group"] if "pks" in route_data: pks = route_data["pks"] route_obj.pks = tuple(pks) if pks else (0,) if "page" in route_data: route_obj.page = route_data["page"] route_obj.save() @classmethod def _save_browser_settings_data(cls, instance: SettingsBrowser, data: dict) -> None: """Persist a params dict to a SettingsBrowser and its related rows.""" for key in instance.DIRECT_KEYS: if key in data: setattr(instance, key, data[key]) if "q" in data: instance.search = data["q"] if show_data := data.get("show"): cls._save_browser_show(instance, show_data) instance.save() if filters_data := data.get("filters"): cls._save_browser_filters(instance.filters, filters_data) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] if route_data := data.get("last_route"): cls._save_browser_last_route(instance.last_route, route_data) # pyright: ignore[reportAttributeAccessIssue], # ty: ignore[unresolved-attribute] @staticmethod def _save_reader_settings_data(instance: SettingsReader, data: dict) -> None: """Persist a params dict to a SettingsReader row.""" for key in instance.DIRECT_KEYS: if key in data: setattr(instance, key, data[key]) instance.save() def _save_settings_data(self, data: dict) -> None: """Persist the settings dict to the view's own model.""" instance = self._get_or_create_settings( self.MODEL, self.CLIENT, self.FILTER_ARGS, self.CREATE_ARGS, ) if isinstance(instance, SettingsBrowser): self._save_browser_settings_data(instance, data) else: self._save_reader_settings_data(instance, data) # pyright: ignore[reportArgumentType], # ty: ignore[invalid-argument-type] def save_params_to_settings(self, params) -> None: # reader session & browser final """Save the session from params with defaults for missing values.""" try: # Deepcopy this so serializing the values later for http response doesn't alter them data = deepcopy(dict(params)) self._save_settings_data(data) except Exception as exc: logger.warning(f"Saving params to session: {exc}") ================================================ FILE: codex/views/template.py ================================================ """Generic Codex Template View.""" from collections.abc import Sequence from rest_framework import status from rest_framework.renderers import BaseRenderer, TemplateHTMLRenderer from rest_framework.response import Response from rest_framework.views import APIView class TemplateXMLRenderer(TemplateHTMLRenderer): """Template rendeerer for xml.""" media_type = "text/xml" format = "xml" class CodexAPIView(APIView): """APIView with a simple getter and no data.""" content_type = "application/json" status_code = status.HTTP_200_OK def get(self, *args, **kwargs) -> Response: """Render the template with correct content_type.""" return Response( data={}, status=self.status_code, content_type=self.content_type ) class CodexTemplateView(CodexAPIView): """HTML Template View.""" renderer_classes: Sequence[type[BaseRenderer]] = (TemplateHTMLRenderer,) content_type = "text/html" class CodexXMLTemplateMixin: """XML Template View.""" renderer_classes: Sequence[type[BaseRenderer]] = (TemplateXMLRenderer,) content_type = "application/xml" ================================================ FILE: codex/views/timezone.py ================================================ """Set timezone from browser endpoint.""" from drf_spectacular.utils import extend_schema from loguru import logger from rest_framework.response import Response from rest_framework.serializers import BaseSerializer from codex.serializers.auth import TimezoneSerializer from codex.serializers.mixins import OKSerializer from codex.views.auth import AuthGenericAPIView class TimezoneView(AuthGenericAPIView): """User info.""" input_serializer_class = TimezoneSerializer serializer_class: type[BaseSerializer] | None = OKSerializer def _save_timezone(self, django_timezone) -> None: """Save django timezone in session.""" if not django_timezone: return session = self.request.session session["django_timezone"] = django_timezone session.save() @extend_schema(request=input_serializer_class) def put(self, *args, **kwargs) -> Response: """Get the user info for the current user.""" data = self.request.data serializer = self.input_serializer_class(data=data) serializer.is_valid(raise_exception=True) try: timezone = serializer.validated_data.get("timezone") self._save_timezone(timezone) except Exception as exc: reason = f"update user timezone {exc}" logger.warning(reason) serializer = self.get_serializer() return Response(serializer.data) ================================================ FILE: codex/views/util.py ================================================ """Utility classes by many views.""" from collections.abc import Mapping from dataclasses import dataclass from typing import override @dataclass class Route: """Breadcrumb, like a route.""" group: str pks: tuple[int, ...] page: int = 1 name: str = "" @override def __hash__(self) -> int: """Breadcrumb hash.""" pk_parts = tuple(sorted(set(self.pks))) parts = (self.group, pk_parts, self.page) return hash(parts) @override def __eq__(self, cmp) -> bool: """Breadcrumb equality.""" return cmp and hash(self) == hash(cmp) def pop_name(kwargs: Mapping) -> Mapping: """Pop name from a mapping route.""" kwargs = dict(kwargs) kwargs.pop("name", None) return kwargs ================================================ FILE: codex/views/version.py ================================================ """Version View.""" from typing import override from rest_framework.response import Response from codex.librarian.bookmark.tasks import CodexLatestVersionTask from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.models import Timestamp from codex.serializers.versions import VersionsSerializer from codex.settings import DOCKER_IMAGE_DEPRECATED from codex.version import VERSION from codex.views.auth import AuthGenericAPIView class VersionView(AuthGenericAPIView): """Return Codex Versions.""" serializer_class = VersionsSerializer @override def get_object(self) -> dict[str, str]: """Get the versions.""" ts = Timestamp.objects.get(key=Timestamp.Choices.CODEX_VERSION.value) if ts.version: latest_version = ts.version else: LIBRARIAN_QUEUE.put(CodexLatestVersionTask()) latest_version = "fetching..." return { "installed": VERSION, "latest": latest_version, "warning": DOCKER_IMAGE_DEPRECATED, } def get(self, *args, **kwargs) -> Response: """Get Versions.""" obj = self.get_object() serializer = self.get_serializer(obj) return Response(serializer.data) ================================================ FILE: codex/websockets/README.md ================================================ # Why Channels? I use only a fraction of django channels' functionality, but, `channels.auth.AuthMiddlewareStack`is nice. ================================================ FILE: codex/websockets/__init__.py ================================================ """Django Channels for Codex.""" ================================================ FILE: codex/websockets/consumers.py ================================================ """Notifier ChannelGroups Consumer.""" from enum import Enum from typing import override from channels.generic.websocket import AsyncWebsocketConsumer from loguru import logger ChannelGroups = Enum("ChannelGroups", "ALL ADMIN") class NotifierConsumer(AsyncWebsocketConsumer): """Base Notifier Consumer.""" def _get_groups(self) -> list[str]: """Dynamic groups by user type.""" groups = [ChannelGroups.ALL.name] user = self.scope.get("user") if self.scope else None user_channel = None if user: user_channel = f"user_{user.id}" else: session = self.scope.get("session") if session: user_channel = f"user_{session.session_key}" if user_channel: groups += [user_channel] if user and user.is_staff: groups += [ChannelGroups.ADMIN.name] return groups @override async def websocket_connect(self, message) -> None: """Authorize with user and connect websocket to groups.""" # Set groups for user. self.groups = self._get_groups() await super().websocket_connect(message) logger.trace(f"Websocket connected to {self.groups}") @override async def disconnect(self, code) -> None: """Close channels after WebSocket disconnect.""" await self.close(code) async def send_text(self, event) -> None: """Send message to client.""" text = event.get("text") if not text: logger.warning(f"No text in websockets message: {event}") return await self.send(text) ================================================ FILE: codex/websockets/listener.py ================================================ """Listens to the Broadcast Queue and sends its messages to channels.""" import asyncio from queue import Empty from types import MappingProxyType from channels.exceptions import InvalidChannelLayerError from channels.layers import get_channel_layer from codex.websockets.consumers import ChannelGroups WS_NORMAL_CLOSURE = 1000 class BroadcastListener: """Listens to the Broadcast Queue and sends its messages to channels.""" _WS_DISCONNECT_EVENT = MappingProxyType( { "group": ChannelGroups.ALL.name, "message": { "type": "websocket_disconnect", "code": WS_NORMAL_CLOSURE, }, } ) def __init__(self, logger_, queue) -> None: """Initialize.""" self.log = logger_ self.queue = queue self.channel_layer = get_channel_layer() async def broadcast_group(self, event) -> None: """Broadcast message to a group of channels..""" if not self.channel_layer: reason = "No channel layer found" raise InvalidChannelLayerError(reason) group = event["group"] message = event["message"] await self.channel_layer.group_send(group, message) async def shutdown(self) -> None: """Broadcast disconnect message and close the queue.""" await self.broadcast_group(self._WS_DISCONNECT_EVENT) self.log.debug("Sent disconnect to all channels.") try: while not self.queue.empty(): self.queue.get_nowait() except Empty: pass self.queue.close() self.queue.join_thread() async def listen(self) -> None: """Listen to the broadcast queue until a shutdown message.""" self.log.success(f"{self.__class__.__name__} started.") while True: try: event = await asyncio.to_thread(self.queue.get) if event is None: break await self.broadcast_group(event) except Exception as exc: self.log.warning(f"{self.__class__.__name__} listen: {exc}") await self.shutdown() self.log.info(f"{self.__class__.__name__} shutdown") ================================================ FILE: codex/websockets/mp_queue.py ================================================ """Global queue to send async queue messages to consumers from other processes.""" from multiprocessing import Queue BROADCAST_QUEUE = Queue() ================================================ FILE: compose.yaml ================================================ services: ci: build: context: . target: codex-ci container_name: codex-ci entrypoint: - sleep - infinity environment: - PYTHONUNBUFFERED=1 image: codex-ci:ci volumes: - ./test-results:/app/test-results - ./dist:/app/dist codex: build: context: . dockerfile: ci/Dockerfile image: docker.io/ajslater/codex container_name: codex ports: - 9810:9810 volumes: - ./config:/config - ./comics:/comics:ro codex-backend-test: image: docker.io/ajslater/codex-dist-builder:$CODEX_DIST_BUILDER_VERSION container_name: codex-backend-test volumes: - ./codex/static_build:/app/codex/static_build:ro - ./codex/static:/app/codex/static command: make collectstatic django-check test-python -o build-frontend codex-build-dist: image: docker.io/ajslater/codex-dist-builder:$CODEX_DIST_BUILDER_VERSION container_name: codex-build-dist volumes: - ./codex/static:/app/codex/static:ro - ./dist/:/app/dist/ command: make build-only codex-dev: build: context: . dockerfile: ci/dev.Dockerfile image: codex-dev container_name: codex-dev environment: - VITE_HOST=localhost ports: - 9810:9810 volumes: - .:/app - ./comics:/comics command: tail -f /dev/null codex-dist-builder: image: docker.io/ajslater/codex-dist-builder:$CODEX_DIST_BUILDER_VERSION codex-frontend-build: image: docker.io/ajslater/codex-dist-builder:$CODEX_DIST_BUILDER_VERSION container_name: codex-frontend-build volumes: - ./frontend/src/choices:/app/frontend/src/choices:ro - ./codex/static_build:/app/codex/static_build command: make build-frontend -o build-choices codex-frontend-test: image: docker.io/ajslater/codex-dist-builder:$CODEX_DIST_BUILDER_VERSION container_name: codex-frontend-test command: make test-frontend volumes: - ./frontend/src/choices:/app/frontend/src/choices - ./test-results/:/app/test-results/ codex-lint: image: docker.io/ajslater/codex-dist-builder:$CODEX_DIST_BUILDER_VERSION container_name: codex-lint command: make lint codex-package: build: context: . dockerfile: ci/package.Dockerfile image: codex-package container_name: codex-package env_file: - .env.package ports: - 9810:9810 volumes: - ./dist:/dist - ./comics:/comics - ./config:/config nginx: image: lscr.io/linuxserver/nginx container_name: nginx restart: unless-stopped ports: - 80:80 volumes: - ./nginx/default.conf:/config/nginx/site-confs/default.conf:ro ================================================ FILE: docs/DOCKER.md ================================================ # 🐳 Codex Docker Image Codex is a web server comic book browser and reader. ## Documentation, Source, and Issue Reports - [Codex Documentation](https://codex-comic-reader.readthedocs.io/) - [Codex Docker Images](https://github.com/ajslater/codex/pkgs/container/codex) - [Codex Source](https://github.com/ajslater/codex) - [Codex Issues](https://github.com/ajslater/codex/issues) ## Usage Here are some example snippets to help you get started creating a container from this image. ### docker ```sh docker create \ --name=codex \ -p 9810:9810 \ -e PUID=501 \ -e PGID=20 \ -v /host/path/to/config:/config \ -v /host/path/to/comics:/comics \ --restart unless-stopped \ ghcr.io/ajslater/codex ``` ### compose.yaml ```yaml services: codex: image: ghcr.io/ajslater/codex container_name: codex env_file: .env volumes: - /host/path/to/config:/config - /host/path/to/comics:/comics:ro ports: - "9810:9810" restart: on-failure healthcheck: test: ["CMD", "curl", "--fail", "http://localhost:9810/health"] interval: 30s timeout: 10s retries: 3 start_period: 15s ``` Special volume setup for a CIFS share: ```yaml services: my-service: volumes: - nas-share:/container-path volumes: nas-share: driver_opts: type: cifs o: "username=[username],password=[password]" device: //my-nas-network-name/share ``` ### Environment Variables Unique to Docker - `PUID`: Sets the UID for the default user on startup - `PGID`: Sets the GID for the default user on startup ### General Codex Environment Variables Refer to the [Environment Variable Docs](https://codex-comic-reader.readthedocs.io/#environment-variables) for codex environment variables. ### Support Info Shell access whilst the container is running: ```sh docker exec -it codex /bin/bash ``` Monitor the logs of the container in realtime: ```sh docker logs -f codex ``` Container version number ```sh docker inspect -f '{{ index .Config.Labels "org.opencontainers.image.version" }}' codex ``` Image version number ```sh docker inspect -f '{{ index .Config.Labels "org.opencontainers.image.version" }}' ajslater/codex ``` ## Docker Image [This Document](https://codex-comic-reader.readthedocs.io/DOCKER/) ================================================ FILE: docs/WINDOWS.md ================================================ # 🪟 Codex Native Windows Installation Windows users are encouraged to use Docker to run Codex, but it will also run natively on the Windows Subsystem for Linux. ## Install or Upgrade WSL [Use Microsoft's instructions to install the WSL](https://learn.microsoft.com/en-us/windows/wsl/install). If you have previously installed WSL 1, you will have the best luck [upgrading it to WSL 2](https://learn.microsoft.com/en-us/windows/wsl/install#upgrade-version-from-wsl-1-to-wsl-2) and using the most recently supported Ubuntu version. ## Install Codex Dependencies Ensure python3 & pip3 are installed in the WSL: ```sh apt install python3-pip ``` ### Ubuntu Linux Dependencies The WSL, by default is an Ubuntu Linux distribution, which is a variety of Debian Linux. Open a shell in the WSL and use the Debian Linux dependency instructions reproduced below: ```sh apt install build-essential libimagequant0 libjpeg-turbo8 libopenjp2-7 libssl libyaml-0-2 libtiff6 libwebp7 python3-dev python3-pip mupdGf unrar zlib1g ``` Versions of packages like libjpeg, libssl, libtiff may differ between flavors and versions of your distributionG. If the package versions listed in the example above are not available, try searching for ones that are with `apt-cache` or `aptitude` if it is installed. ```sh apt-cache search libjpeg-turbo ``` ## Install Codex ### Install Codex with pip for the whole system When you have installed the dependandancies for your platform, you may now install Codex with pip ```sh pip3 install codex --break-system-packages ``` ### Install Codex with pip in a python virtual environment Alternatively, if possibly overriding system packages in the WSL would not be good for you, you may create a [python virtual environment](https://docs.python.org/3/library/venv.html) that will be separate from the system. In the following example `.venv` is the name of the virtual environment, a directory where python will place an entire python environment separate from the system python environment. You can name this directory anything and place it anywhere you like. This directory is traditionally lead with a dot so it becomes a hidden directory but that is not required. ```sh sudo apt update sudo apt install libpython3-dev sudo apt install python3-venv mkdir codex cd codex python -m venv .venv ``` Now you must activate the virtual environment: ```sh source .venv/bin/activate ``` Once you have activated the virtual environment you may install codex and it's python dependencies in the virtual environment. ```sh pip3 install codex ``` To run Codex you will have to have this virtual environment activated. So in the future if you create a new shell to start codex, you must source the activate script again above before running Codex. It seems the codex script maye also be installed to `$HOME/.local/bin` which is not usually on the executable search path. To add this directory to the path: ```sh export PATH=$PATH:$HOME/.local/bin ``` You will probably want to add this line to your `$HOME/.bashrc` or `$HOME/.profile` file to execute it every time you start a Linux shell. ## Mounting Network Drives on WSL If your comics are on another machine, mounting network drives with the Samba 3 driver may avoid problems that may occur if you mount drives with the DrvFs or CIFS drivers. To mount a drive from server named `server` to the /mnt/comics directory once for this session: ```sh sudo mount -t smb3 //server/comics /mnt/comics -o vers=3.1.1,defaults,username='comics',password='password' ``` To mount the drive every time WSL starts up edit the `/etc/fstab` file with a line similar to: ```sh # file system dir type options dump pass //server/comics /mnt/comics smb3 vers=3.1.1,username='comics',password='comics' 0 0 ``` ### Illegal Characters in Samba Network Drives Network filesystems may contain characters that are illegal under Windows such as `\ / : * ? " < > |` or special unicode or other character encodings. The Samba driver will mangle these for presentation, often substituting a `?` character for the illegal character. The simplest solution for these files is to rename the files. But it may [also be possible](https://serverfault.com/questions/124611/special-characters-in-samba-filenames) to add a `iocharset=iso8859-1` to the mount options and achieve some success. If this works for you please report it so I can update this documentation. ## Run Codex Return to the Main [README](README.md#administration) for help running and administering Codex. ## Special Thanks - To [ProfessionalTart](https://github.com/professionaltart) for providing the majority of these instructions. ================================================ FILE: docs/requirements.txt ================================================ mkdocs~=1.6 mkdocs-minify-plugin~=0.8 ================================================ FILE: docs/style.material.css ================================================ /* material */ [data-md-color-scheme="codex"] { --md-primary-fg-color: rgb(204, 123, 25); --md-primary-bg-color: rgb(18, 18, 18); --md-accent-fg-color: #d3d3d3; --md-accent-bg-color: #965b13; } .md-header, .md-tabs { --md-primary-fg-color: #965b13; } ================================================ FILE: docs/style.mkdocs.css ================================================ /* mkdocs */ body, body::before { --bs-body-color: rgb(200, 200, 200); --bs-body-bg: rgb(25, 25, 25); } a { text-decoration: none; color: rgb(204, 123, 25); } .nav-link { --bs-nav-link-color: rgb(204, 123, 25); } .navbar .nav-link { --bs-nav-link-color: #d3d3d3; } a:hover, .nav-link:hover, .navbar .nav-link:hover { color: white; } .navbar.bg-primary { background-image: none; } .bg-primary { background-color: #965b13 !important; } .form-control:focus { border-color: rgb(204, 123, 25); box-shadow: 0 0 0 0.25rem rgba(204, 123, 25, 0.25); } ================================================ FILE: docs/style.readthedocs.css ================================================ /* readthedocs */ .wy-nav-content-wrap, .wy-nav-side, .wy-side-nav-search, .wy-nav-content, .wy-menu, .wy-menu-vertical, .toctree-l1, .toctree-l1 > a.internal, .toctree-l2, .toctree-l2 > a.internal, .toctree-l3, .toctree-l3 > a.internal, .toctree-l4, .toctree-l4 > a.internal, .wymenu-vertical li.current > a, .wymenu-vertical li.on > a, .toctree-expand { background-color: #222 !important; color: #d3d3d3; } ================================================ FILE: eslint.config.js ================================================ import { defineConfig } from "eslint/config"; import baseConfig from "./cfg/eslint.config.base.js"; import eslintPluginVue from "eslint-plugin-vue"; import eslintPluginVitest from "@vitest/eslint-plugin"; import eslintPluginVueScopedCSS from "eslint-plugin-vue-scoped-css"; import eslintPluginConfigPrettier from "eslint-config-prettier"; import path from "path"; import { fileURLToPath } from "url"; const __dirname = path.dirname(fileURLToPath(import.meta.url)); export default defineConfig([ { name: "codexIgnores", ignores: [ "codex/_vendor/", "codex/static_build/", "codex/static/", "codex/templates/*.html", // Handled by djlint "codex/templates/**/*.html", // Handled by djlint "codex/templates/pwa/serviceworker-register.js", // removes eslint-disable that it then complains about // "frontend", ], }, ...baseConfig, ...eslintPluginVue.configs["flat/recommended"].map((c) => ({ ...c, files: ["**/*.vue"], })), ...eslintPluginVueScopedCSS.configs.all.map((c) => ({ ...c, files: ["**/*.vue"], })), eslintPluginConfigPrettier, // Again last after adding other plugins. { files: ["frontend/**/*.{js,vue}"], rules: { "no-console": [ "warn", { allow: ["clear", "debug", "info", "warn", "error"] }, ], "no-secrets/no-secrets": [ "error", { ignoreContent: [ "notify_groups_changed", "notify_failed_imports_changed", ], }, ], }, settings: { "import/extensions": [".js", ".vue"], "import/parsers": { "vue-eslint-parser": [".vue"], "@eslint/json": [".json"], }, "import/resolver": { alias: { map: [["@", path.resolve(__dirname, "frontend/src")]] }, }, }, }, { files: ["eslint.config.js", "cfg/eslint.config.js"], rules: { "no-secrets/no-secrets": "off", }, }, { files: ["frontend/src/choices/browser-map.json"], rules: { "json/no-empty-keys": "off" }, }, { files: ["frontend/tests/**"], ...eslintPluginVitest.configs.recommended }, { files: ["tests/files/comicbox.update.yaml"], rules: { "yml/no-empty-mapping-value": "off", }, }, ]); ================================================ FILE: frontend/.gitignore ================================================ __pycache__/ __pypackages__/ __snapshots__ .*cache .claude .coverage* .dmypy.json .docker-token .DS_Store .eggs/ .env .env-* .eslintcache .hypothesis/ .installed.cfg .ipynb_checkpoints .mypy_cache/ .nox/ .npm .pypi-token .pypirc .pyre/ .pytest_cache/ .Python .python-version .ropeproject .ruff_cache/ .scrapy .spyderproject .spyproject .tox/ .uv-publish-env .venv* .webassets-cache *.cover *.egg *.egg-info/ *.log *.manifest *.mo *.pot *.py,cover *.py[cod] *.sage.py *.so *.spec *~ *$py.class build build/ celerybeat-schedule celerybeat.pid coverage.xml develop-eggs/ dist dist/ dmypy.json docs/_build/ downloads/ eggs/ env.bak/ env/ ENV/ htmlcov/ instance/ ipython_config.py jspm_packages/ lib/ lib64/ local_settings.py MANIFEST monkeytype.sqlite3 node_modules node_modules/ nosetests.xml parts/ pip-delete-this-directory.txt pip-log.txt pip-wheel-metadata/ profile_default/ sdist/ share/python-wheels/ target/ test-results TODO.md var/ venv.bak/ venv/ wheels/ ================================================ FILE: frontend/.npmrc ================================================ node-options=--trace-warnings ================================================ FILE: frontend/.prettierignore ================================================ __pycache__ .*cache/ .circleci .claude .git .venv *Dockerfile components.d.ts coverage dist node_modules package-lock.json public src/choices test-results tests/**/*.json tests/**/*.xml tests/**/*.yaml tests/**/*.yml uv.lock webpack-stats.json ================================================ FILE: frontend/.remarkignore ================================================ __pycache__/ __pypackages__/ __snapshots__ .*cache .claude .coverage* .dmypy.json .docker-token .DS_Store .eggs/ .env .env-* .eslintcache .hypothesis/ .installed.cfg .ipynb_checkpoints .mypy_cache/ .nox/ .npm .pypi-token .pypirc .pyre/ .pytest_cache/ .Python .python-version .ropeproject .ruff_cache/ .scrapy .spyderproject .spyproject .tox/ .uv-publish-env .venv* .webassets-cache *.cover *.egg *.egg-info/ *.log *.manifest *.mo *.pot *.py,cover *.py[cod] *.sage.py *.so *.spec *~ *$py.class build build/ celerybeat-schedule celerybeat.pid coverage.xml develop-eggs/ dist dist/ dmypy.json docs/_build/ downloads/ eggs/ env.bak/ env/ ENV/ htmlcov/ instance/ ipython_config.py jspm_packages/ lib/ lib64/ local_settings.py MANIFEST monkeytype.sqlite3 node_modules node_modules/ nosetests.xml parts/ pip-delete-this-directory.txt pip-log.txt pip-wheel-metadata/ profile_default/ sdist/ share/python-wheels/ target/ test-results TODO.md var/ venv.bak/ venv/ wheels/ ================================================ FILE: frontend/.shellcheckrc ================================================ external-sources=true ================================================ FILE: frontend/Makefile ================================================ SHELL := /usr/bin/env bash DEVENV_SRC := ../../devenv include cfg/codex-frontend.mk # include cfg/django.mk # include cfg/frontend.mk # include cfg/python.mk # include cfg/ci.mk # include cfg/docker.mk # include cfg/docs.mk include cfg/node.mk # include cfg/node_root.mk include cfg/common.mk include cfg/help.mk .PHONY: all ================================================ FILE: frontend/README.md ================================================ # Codex Frontend The codex frontend runs on [VueJS](https://vuejs.org/) and [Vuetify](https://vuetifyjs.com). ## Development See the package.json file for common development scripts. Running the live reloading dev server is your best bet. ## Production The Django collectstatic script packages the vite rolled up modules from `codex/static_build/` and packages them with the main server app in `codex/static/`. ================================================ FILE: frontend/bin/dev-server.sh ================================================ #!/bin/sh # Run the live reloading front end development server THIS_DIR="$(dirname "$0")" cd "$THIS_DIR" || exit 1 npm run dev ================================================ FILE: frontend/bin/fix.sh ================================================ #!/usr/bin/env bash # Fix common linting errors set -euxo pipefail ##################### ###### Makefile ##### ##################### uv run mbake format Makefile cfg/*.mk ################ # Ignore files # ################ bin/sort-ignore.sh ############################################ ##### Javascript, JSON, Markdown, YAML ##### ############################################ bun run fix ################### ###### Shell ###### ################### shellharden --replace ./**/*.sh ================================================ FILE: frontend/bin/kill-eslint_d.sh ================================================ #!/usr/bin/env bash # eslint_d can get into a bad state if git switches branches underneath it npx eslint_d stop pkill eslint_d rm -f .eslintcache ================================================ FILE: frontend/bin/lint-darwin.sh ================================================ #!/usr/bin/env bash # Lint checks set -euxo pipefail if [ "$(uname)" != "Darwin" ]; then exit 0 fi shellharden --check ./**/*.sh # subdirs aren't copied into docker builder # .env files aren't copied into docker shellcheck --external-sources ./**/*.sh ================================================ FILE: frontend/bin/lint.sh ================================================ #!/usr/bin/env bash # Lint checks set -euxo pipefail uv run mbake validate Makefile cfg/*.mk # Javascript, JSON, Markdown, YAML ##### npm run lint bin/lint-darwin.sh uv run bin/roman.py -i .prettierignore . ================================================ FILE: frontend/bin/roman.py ================================================ #!/usr/bin/env python3 """ Check shell scripts recursively for a descriptive comment on line 2. Detects shell scripts by shebang. Inspired by @defunctzombie """ from __future__ import annotations import re import sys from argparse import ArgumentParser, Namespace, RawDescriptionHelpFormatter from pathlib import Path from typing import TYPE_CHECKING from pathspec import PathSpec if TYPE_CHECKING: from collections.abc import Generator, Sequence # --------------------------------------------------------------------------- # Constants # --------------------------------------------------------------------------- SHELL_SHEBANG_PATTERN: re.Pattern[str] = re.compile(r"^#!.*sh") # Patterns that are always excluded regardless of an ignore file. DEFAULT_EXCLUDE_PATTERNS: list[str] = [ ".*", # hidden files / directories "*~", # editor backup files ] COMMENT_PATTERN: re.Pattern[str] = re.compile(r"^#+.{4}") # Number of bytes to read when sniffing the shebang — avoids loading huge # binary files into memory. SHEBANG_READ_BYTES: int = 512 # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- def build_ignore_spec(ignore_path: Path | None) -> PathSpec: """Return a gitignore style PathSpec built from *ignore_path* plus the built-in defaults.""" lines: list[str] = list(DEFAULT_EXCLUDE_PATTERNS) if ignore_path is not None: lines += ignore_path.read_text(encoding="utf-8").splitlines() return PathSpec.from_lines("gitwildmatch", lines) def read_first_two_lines(path: Path) -> tuple[str, str]: """Return the first two lines of *path* as a (line1, line2) tuple.""" try: raw = path.read_bytes()[:SHEBANG_READ_BYTES] text = raw.decode("utf-8", errors="replace") except OSError: return "", "" lines = text.splitlines() line1 = lines[0] if len(lines) > 0 else "" line2 = lines[1] if len(lines) > 1 else "" return line1, line2 def is_shell_script(line1: str) -> bool: """Return True when *line1* looks like a shell shebang.""" return bool(SHELL_SHEBANG_PATTERN.search(line1)) def has_description_comment(line2: str) -> bool: """Return True when *line2* starts with a ``# `` comment.""" return bool(COMMENT_PATTERN.match(line2)) def iter_files(path_strs: Sequence[str], spec: PathSpec) -> Generator[Path]: """ Yield every file under *roots* that is not excluded by *spec*. Each candidate path is tested relative to the root it was found under so that gitignore-style directory patterns (e.g. ``vendor/``) work correctly. """ for path_str in path_strs: path = Path(path_str) if not path.exists(): print(f"👎 Path does not exist: {path}", file=sys.stderr) # noqa: T201 sys.exit(2) root = Path(path).resolve() if root.is_file(): rel = Path(root.name) if not spec.match_file(str(rel)): yield root continue for sub_path in sorted(root.rglob("*")): if not sub_path.is_file(): continue try: rel = sub_path.relative_to(root) except ValueError: rel = sub_path # Match against each component so directory patterns work if spec.match_file(str(rel)): continue yield sub_path # --------------------------------------------------------------------------- # CLI # --------------------------------------------------------------------------- def build_parser() -> ArgumentParser: """Build cli arg parser.""" parser = ArgumentParser( description="Find shell scripts that are missing a descriptive comment on line 2.", formatter_class=RawDescriptionHelpFormatter, epilog=( "Exit status: 0 if all shell scripts pass, 1 if any are missing\n" "a comment on line 2, 2 on usage / IO errors." ), ) parser.add_argument( "paths", nargs="+", metavar="PATH", help="Files or directories to examine.", ) parser.add_argument( "-i", "--ignore-file", metavar="FILE", help="Ignore-file with gitignore-style patterns (e.g. .zombieignore).", ) return parser def _parse_ignore_file(args: Namespace) -> PathSpec: ignore_path: Path | None = None if args.ignore_file: ignore_path = Path(args.ignore_file) if not ignore_path.is_file(): print(f"👎 Can't read ignore file: {ignore_path}", file=sys.stderr) # noqa: T201 sys.exit(2) try: return build_ignore_spec(ignore_path) except OSError as exc: print(f"👎 Failed to parse ignore file: {exc}", file=sys.stderr) # noqa: T201 sys.exit(2) def main() -> None: """Run program.""" parser = build_parser() args = parser.parse_args() spec = _parse_ignore_file(args) offenders: list[Path] = [] for path in iter_files(args.paths, spec): line1, line2 = read_first_two_lines(path) if not is_shell_script(line1): continue if not has_description_comment(line2): print(f"🔪 {path}") # noqa: T201 offenders.append(path) if offenders: print( # noqa: T201 f"\n{len(offenders)} script(s) missing a description comment.", file=sys.stderr, ) sys.exit(1) print("👍") # noqa: T201 if __name__ == "__main__": main() ================================================ FILE: frontend/bin/sort-ignore.sh ================================================ #!/usr/bin/env bash # Sort all ignore files in place and remove duplicates for f in .*ignore; do if [ ! -L "$f" ]; then sort --mmap --unique --output="$f" "$f" echo "$f" sorted fi done ================================================ FILE: frontend/bin/update-deps-node.sh ================================================ #!/usr/bin/env bash # Update npm dependencies set -euo pipefail bun update bun outdated || true ================================================ FILE: frontend/bin/version-node.sh ================================================ #!/usr/bin/env bash # Get version or set version in Frontend & API. set -euo pipefail VERSION="${1:-}" if [ "$VERSION" = "" ]; then if [ -d frontend ]; then cd frontend node -e "const {name, version} = require('./package.json'); console.log(name, version);" fi else if [ -d frontend ]; then cd frontend npm version --allow-same-version "$VERSION" fi fi ================================================ FILE: frontend/cfg/codex-frontend.mk ================================================ SHELL := /usr/bin/env bash .PHONY: clean ## Remove static_build contents ## @category build clean:: rm -rf ../codex/static_build/* .PHONY: dev-server ## Run Dev Frontend Server ## @category Run dev-server: ./bin/dev-server.sh .PHONY: test ## Run All Tests ## @category Test test: bun run test:ci .PHONY: build ## Build package ## @category build build: clean bun run build .PHONY: all ================================================ FILE: frontend/cfg/common.mk ================================================ SHELL := /usr/bin/env bash DEVENV_SRC ?= ../devenv export DEVENV_SRC DEVENV_COMMON := 1 export DEVENV_COMMON .PHONY: clean ## Clean caches ## @category Clean clean:: rm -rf .*cache .PHONY: update-devenv ## Update development environment ## @category Update update-devenv: $(DEVENV_SRC)/scripts/update_devenv.py .PHONY: fix ## Fix lint errors ## @category Fix fix:: ./bin/fix.sh .PHONY: lint ## Lint ## @category Lint lint:: ./bin/lint.sh .PHONY: news ## Show recent NEWS ## @category Deploy news: head -40 NEWS.md ================================================ FILE: frontend/cfg/help.mk ================================================ # Inspired from # https://github.com/Mischback/django-calingen/blob/3f0e6db6/Makefile # and https://gist.github.com/klmr/575726c7e05d8780505a # fancy colors cyan := "$$(tput setaf 6)" green := "$$(tput setaf 2)" red := "$$(tput setaf 1)" yel := "$$(tput setaf 3)" gray := "$$(tput setaf 8)" grayb := "$$(printf "\033[1m"; tput setaf 8)" end := "$$(tput sgr0)" TARGET_STYLED_HELP_NAME = "$(cyan)TARGET$(end)" ARGUMENTS_HELP_NAME = "$(green)ARGUMENT$(end)=$(red)VALUE$(end)" # This mountrous sed is compatible with both GNU sed and BSD sed (for macOS). # That's why "-E", "|", "+", "\s", "?", and "\t" aren't used. See the details # about BSD sed vs. GNU sed: https://riptutorial.com/sed/topic/9436 target_regex := [a-zA-Z0-9%_\/%-][a-zA-Z0-9%_\/%-]* variable_regex := [^:= ][^:= ]* variable_assignment_regex := [ ]*:*[+:!\?]*= * value_regex := .* category_annotation_regex := @category * category_regex := [^<][^<]* # We first parse and markup with these ad-hoc tags, and then we turn the markup # into a colorful output. target_tag_start := target_tag_end := target_variable_tag_start := target_variable_tag_end := variable_tag_start := variable_tag_end := global_variable_tag_start := global_variable_tag_end := value_tag_start := value_tag_end := prerequisites_tag_start := prerequisites_tag_end := doc_tag_start := doc_tag_indented_start := doc_tag_indented_end := doc_tag_end := category_tag_start := category_tag_end := default_category_tag_start := default_category_tag_end := DEFAULT_CATEGORY = General .DEFAULT_GOAL := help .PHONY: help help: @echo "Usage: make [$(TARGET_STYLED_HELP_NAME) [$(TARGET_STYLED_HELP_NAME) ...]] [$(ARGUMENTS_HELP_NAME) [$(ARGUMENTS_HELP_NAME) ...]]" @cat ${MAKEFILE_LIST} \ | tr '\t' ' ' \ | sed -n -e "/^## / { \ h; \ s/.*/##/; \ :doc" \ -e "H; \ n; \ s|^## *\(.*\)|$(doc_tag_start)$(doc_tag_indented_start)\1$(doc_tag_indented_end)$(doc_tag_end)|; \ s|^## *\(.*\)|$(doc_tag_start)\1$(doc_tag_end)|; \ t doc" \ -e "s| *#[^#].*||; " \ -e "s|^\(define *\)\($(variable_regex)\)$(variable_assignment_regex)\($(value_regex)\)|$(global_variable_tag_start)\2$(global_variable_tag_end)$(value_tag_start)\3$(value_tag_end)|;" \ -e "s|^\($(variable_regex)\)$(variable_assignment_regex)\($(value_regex)\)|$(global_variable_tag_start)\1$(global_variable_tag_end)$(value_tag_start)\2$(value_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\(\($(variable_regex)\)$(variable_assignment_regex)\($(value_regex)\)\)|$(target_variable_tag_start)\1$(target_variable_tag_end)$(variable_tag_start)\3$(variable_tag_end)$(value_tag_start)\4$(value_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\($(target_regex)\( *$(target_regex)\)*\) *\(\| *\( *$(target_regex)\)*\)|$(target_tag_start)\1$(target_tag_end)$(prerequisites_tag_start)\2$(prerequisites_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\($(target_regex)\( *$(target_regex)\)*\)|$(target_tag_start)\1$(target_tag_end)$(prerequisites_tag_start)\2$(prerequisites_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *\(\| *\( *$(target_regex)\)*\)|$(target_tag_start)\1$(target_tag_end)|;" \ -e "s|^\($(target_regex)\) *: *|$(target_tag_start)\1$(target_tag_end)|;" \ -e " \ G; \ s|## *\(.*\) *##|$(doc_tag_start)\1$(doc_tag_end)|; \ s|\\n||g;" \ -e "/$(category_annotation_regex)/!s|.*|$(default_category_tag_start)$(DEFAULT_CATEGORY)$(default_category_tag_end)&|" \ -e "s|^\(.*\)$(doc_tag_start)$(category_annotation_regex)\($(category_regex)\)$(doc_tag_end)|$(category_tag_start)\2$(category_tag_end)\1|" \ -e "p; \ }" \ | sort \ | sed -n \ -e "s|$(default_category_tag_start)|$(category_tag_start)|" \ -e "s|$(default_category_tag_end)|$(category_tag_end)|" \ -e "{G; s|\($(category_tag_start)$(category_regex)$(category_tag_end)\)\(.*\)\n\1|\2|; s|\n.*||; H; }" \ -e "s|$(category_tag_start)||" \ -e "s|$(category_tag_end)|:\n|" \ -e "s|$(target_variable_tag_start)|$(target_tag_start)|" \ -e "s|$(target_variable_tag_end)|$(target_tag_end)|" \ -e "s|$(target_tag_start)| $(cyan)|" \ -e "s|$(target_tag_end)|$(end) |" \ -e "s|$(prerequisites_tag_start).*$(prerequisites_tag_end)||" \ -e "s|$(variable_tag_start)|$(green)|g" \ -e "s|$(variable_tag_end)|$(end)|" \ -e "s|$(global_variable_tag_start)| $(green)|g" \ -e "s|$(global_variable_tag_end)|$(end)|" \ -e "s|$(value_tag_start)| (default: $(red)|" \ -e "s|$(value_tag_end)|$(end))|" \ -e "s|$(doc_tag_indented_start)|$(grayb)|g" \ -e "s|$(doc_tag_indented_end)|$(end)|g" \ -e "s|$(doc_tag_start)|\n |g" \ -e "s|$(doc_tag_end)||g" \ -e "p" ================================================ FILE: frontend/cfg/node.mk ================================================ DEVENV_NODE := 1 export DEVENV_NODE .PHONY: install-deps-node ## Update and install node packages ## @category Install install-deps-node: npm install .PHONY: install ## Install ## @category Install install:: install-deps-node .PHONY: update-node ## Update node dependencies ## @category Update update-node: ./bin/update-deps-node.sh .PHONY: update ## Update dependencies ## @category Update update:: update-node .PHONY: kill-eslint_d ## Kill eslint daemon ## @category Lint kill-eslint_d: bin/kill-eslint_d.sh ## Show version. Use V variable to set version ## @category Update V := .PHONY: version ## Show or set project version for node ## @category Update version:: bin/version-node.sh $(V) ================================================ FILE: frontend/jsconfig.json ================================================ { "exclude": ["node_modules", "**/node_modules"] } ================================================ FILE: frontend/package.json ================================================ { "name": "codex", "version": "1.10.12", "private": true, "description": "ui for codex api", "type": "module", "scripts": { "build": "vite build", "coverage": "vitest run --coverage", "dev": "vite dev", "devBuild": "vite build --mode development", "fix": "eslint_d --cache --fix . && prettier --write .", "lint": "eslint_d --cache . && prettier --check .", "test": "vitest --reporter verbose --update", "test:ci": "vitest run --reporter verbose --update" }, "dependencies": { "@unhead/vue": "^3.0.5", "@vueuse/core": "^14.2.1", "dequal": "^2.0.3", "eslint-import-resolver-oxc": "^0.15.0", "pinia": "^3.0.4", "pretty-bytes": "^7.1.0", "text-case": "^1.2.10", "vue": "^3.5.33", "vue-pdf-embed": "^2.1.4", "vue-router": "^5.0.6", "vuetify": "^4.0.6", "xior": "^0.8.3" }, "devDependencies": { "@mdi/font": "^7.4.47", "@mdi/js": "^7.4.47", "@pinia/testing": "^1.0.3", "@types/node": "^25.6.0", "@unhead/bundler": "^3.0.5", "@vitejs/plugin-vue": "^6.0.6", "@vue/test-utils": "^2.4.6", "@vue/typescript-plugin": "^3.2.7", "happy-dom": "^20.9.0", "sass": "^1.99.0", "sass-loader": "^16.0.7", "toml": "^4.1.1", "typeface-roboto": "^1.1.13", "vite": "^8.0.10", "vite-plugin-checker": "^0.13.0", "vite-plugin-dynamic-base": "^1.3.0", "vite-plugin-run": "^0.9.0", "vite-plugin-vuetify": "^2.1.3", "vitest": "^4.1.5", "vue-eslint-parser": "^10.4.0" } } ================================================ FILE: frontend/src/admin.vue ================================================ ================================================ FILE: frontend/src/api/v3/admin.js ================================================ import { serializeParams } from "@/api/v3/common"; import { HTTP } from "./base"; // CRUD factory — generates create/getAll/update/destroy for a given admin entity. const makeAdminCRUD = (entity) => { const path = `/admin/${entity}`; return { create: (data) => HTTP.post(path, data), getAll: () => HTTP.get(path, { params: serializeParams() }), update: (pk, data) => HTTP.put(`${path}/${pk}/`, data), destroy: (pk) => HTTP.delete(`${path}/${pk}/`), }; }; const userCRUD = makeAdminCRUD("user"); const groupCRUD = makeAdminCRUD("group"); const libraryCRUD = makeAdminCRUD("library"); // ONE-OFF ENDPOINTS const changeUserPassword = (pk, data) => { return HTTP.put(`/admin/user/${pk}/password`, data); }; const getFolders = (path, showHidden) => { const params = { path, showHidden }; return HTTP.get("/admin/folders", { params }); }; const getFailedImports = () => { const params = serializeParams(); return HTTP.get("/admin/failed-import", { params }); }; const getFlags = () => { const params = serializeParams(); return HTTP.get("/admin/flag", { params }); }; const updateFlag = (key, data) => { return HTTP.put(`/admin/flag/${key}/`, data); }; const postLibrarianTask = async (data) => { return await HTTP.post("/admin/librarian/task", data); }; const getActiveLibrarianStatuses = () => { const params = { ts: Date.now() }; return HTTP.get("/admin/librarian/status", { params }); }; const getAllLibrarianStatuses = () => { const params = { ts: Date.now() }; return HTTP.get("/admin/librarian/status/all", { params }); }; const getStats = () => { const params = { ts: Date.now() }; return HTTP.get("/admin/stats", { params }); }; const updateAPIKey = async () => { return await HTTP.put("/admin/api_key"); }; // Preserve the original function-name keys for dynamic lookup by the admin store // (e.g. API["create" + table], API["get" + pluralTable]). export default { createUser: userCRUD.create, getUsers: userCRUD.getAll, updateUser: userCRUD.update, deleteUser: userCRUD.destroy, changeUserPassword, createGroup: groupCRUD.create, getGroups: groupCRUD.getAll, updateGroup: groupCRUD.update, deleteGroup: groupCRUD.destroy, createLibrary: libraryCRUD.create, getLibraries: libraryCRUD.getAll, updateLibrary: libraryCRUD.update, deleteLibrary: libraryCRUD.destroy, getActiveLibrarianStatuses, getAllLibrarianStatuses, getFailedImports, getFlags, getFolders, getStats, postLibrarianTask, updateAPIKey, updateFlag, }; ================================================ FILE: frontend/src/api/v3/auth.js ================================================ import { serializeParams } from "@/api/v3/common"; import { HTTP } from "./base"; const getAdminFlags = async () => { return await HTTP.get("/auth/flags/"); }; const get_tz = () => new Intl.DateTimeFormat().resolvedOptions().timeZone; const updateTimezone = async () => { const data = { timezone: get_tz(), }; return await HTTP.put("/auth/timezone/", data); }; const register = async (credentials) => { credentials.login = credentials.username; return await HTTP.post("/auth/register/", credentials); }; const login = async (credentials) => { credentials.login = credentials.username; return await HTTP.post("/auth/login/", credentials); }; const getProfile = async () => { const params = serializeParams(); return await HTTP.get("/auth/profile/", { params }); }; const logout = async () => { return await HTTP.post("/auth/logout/"); }; const updatePassword = async (credentials) => { return await HTTP.post("/auth/change-password/", credentials); }; const getToken = async () => { return await HTTP.get("/auth/token/"); }; const updateToken = async () => { return await HTTP.put("/auth/token/"); }; export default { updatePassword, getAdminFlags, getProfile, getToken, login, logout, register, updateTimezone, updateToken, }; ================================================ FILE: frontend/src/api/v3/base.js ================================================ import xior, { merge } from "xior"; const CONFIG = { baseURL: globalThis.CODEX.API_V3_PATH, withCredentials: true, }; export const HTTP = xior.create(CONFIG); // Default Django CSRF token const COOKIE_NAME = "csrftoken"; const CSRF_HEADER = "X-CSRFToken"; const CSRF_COOKIE_REGEX = RegExp("(?:^|;)\\s*" + COOKIE_NAME + "=([^;]*)"); HTTP.interceptors.request.use((config) => { const match = document.cookie.match(CSRF_COOKIE_REGEX); const token = match ? match[1] : ""; if (!token) return config; return merge(config, { headers: { [CSRF_HEADER]: token, }, }); }); /* * When versions of django change users are sometimes caught with CSRF errors * And only logging out fixes it. */ HTTP.interceptors.response.use( (response) => response, async (error) => { // Intercept all errors if ( error.response?.status === 403 && typeof error.response.data === "string" && error.response.data.includes("CSRF") ) { // CSRF failure — delete the sessionid cookie await cookieStore.delete("sessionid"); console.error("CSRF response error. Deleted login cookie."); } return Promise.reject(error); }, ); ================================================ FILE: frontend/src/api/v3/browser.js ================================================ import { serializeParams } from "@/api/v3/common"; import { HTTP } from "./base"; import { toRaw } from "vue"; const getBrowserHrefPath = ({ group, pks, query, ts }) => { const params = serializeParams(query, ts); const queryString = new URLSearchParams(params).toString(); const pkList = pks.join(","); return { hrefPath: `${group}/${pkList}`, queryString }; }; export const getBrowserHref = ({ group, pks, query }) => { const base = globalThis.CODEX.APP_PATH; const { hrefPath, queryString } = getBrowserHrefPath({ group, pks, query, }); return `${base}${hrefPath}/1?${queryString}`; }; export const getCoverSrc = ({ group, pks }, settings, ts) => { const base = globalThis.CODEX.API_V3_PATH; delete settings.show; const { hrefPath, queryString } = getBrowserHrefPath({ group, pks, query: settings, ts, }); return `${base}${hrefPath}/cover.webp?${queryString}`; }; const getAvailableFilterChoices = ({ group, pks }, data, ts) => { const params = serializeParams(data, ts); return HTTP.get(`/${group}/${pks}/choices_available`, { params }); }; const getFilterChoices = ({ group, pks }, fieldName, data, ts) => { const params = serializeParams(data, ts); return HTTP.get(`/${group}/${pks}/choices/${fieldName}`, { params }); }; const getBrowserPage = ({ group, pks, page }, data, ts) => { const params = serializeParams(data, ts, false); return HTTP.get(`/${group}/${pks}/${page}`, { params }); }; const getMetadata = ({ group, pks }, settings) => { const pkList = pks.join(","); const rawSettings = toRaw(settings) || {}; const filters = toRaw(rawSettings?.filters) || {}; const mtime = rawSettings?.mtime; const data = structuredClone({ ...rawSettings, filters }); delete data.mtime; const params = serializeParams(data, mtime, false); return HTTP.get(`/${group}/${pkList}/metadata`, { params }); }; const getSettings = (data) => { const params = serializeParams(data); return HTTP.get("/r/settings", { params }); }; const updateSettings = (settings) => { const params = serializeParams(settings, undefined, false); return HTTP.patch("/r/settings", { params }); }; const resetSettings = () => { return HTTP.delete("/r/settings"); }; export const getGroupDownloadURL = ({ group, pks }, fn, settings, ts) => { const base = globalThis.CODEX.API_V3_PATH; delete settings.show; const { hrefPath, queryString } = getBrowserHrefPath({ group, pks, query: settings, ts, }); fn = encodeURIComponent(fn); return `${base}${hrefPath}/download/${fn}?${queryString}`; }; const updateGroupBookmarks = ({ group, ids }, settings, updates) => { const params = serializeParams(settings); const queryString = new URLSearchParams(params).toString(); if (updates.fitTo === null) { updates.fitTo = ""; } const pkList = ids.join(","); return HTTP.patch(`${group}/${pkList}/bookmark?${queryString}`, updates); }; const getLazyImport = ({ group, pks }) => { return HTTP.get(`/${group}/${pks}/import`); }; const getSavedSettingsList = () => { return HTTP.get("/r/settings/saved"); }; const saveSettings = (name) => { return HTTP.post("/r/settings/saved", { name }); }; const loadSavedSettings = (pk) => { return HTTP.get(`/r/settings/saved/${pk}`); }; const deleteSavedSettings = (pk) => { return HTTP.delete(`/r/settings/saved/${pk}`); }; export default { getAvailableFilterChoices, getBrowserHref, getCoverSrc, getFilterChoices, getGroupDownloadURL, getMetadata, getSettings, getBrowserPage, getLazyImport, updateGroupBookmarks, resetSettings, updateSettings, getSavedSettingsList, saveSettings, loadSavedSettings, deleteSavedSettings, }; ================================================ FILE: frontend/src/api/v3/common.js ================================================ import { toRaw } from "vue"; import { useCommonStore } from "@/stores/common"; import { HTTP } from "./base"; const map = (obj, func) => { // Generic map for arrays and objects switch (obj?.constructor) { case Array: return obj.map((x) => func(x)); case Object: return Object.fromEntries( Object.entries(obj).map(([key, val]) => [key, func(val, key)]), ); default: return obj; } }; const filter = (obj, func) => { // Generic filter for arrays and objects switch (obj?.constructor) { case Array: return obj.filter((x) => func(x)); case Object: return Object.fromEntries( Object.entries(obj).filter(([key, val]) => func(val, key)), ); default: return obj; } }; const _keepIfNotEmpty = (val) => { // Keep flag for filter if (val === undefined) return false; switch (val?.constructor) { case Array: case String: return val.length > 0; case Object: return Object.keys(val).length > 0; default: return true; } }; const _deepClone = (obj, filterEmpty = false) => { // Deep clone vue proxyObjects and optionally filter empty elements. obj = toRaw(obj); const _keep = (val) => { // Keep flag for filter, closure for filterEmpty flag. return !filterEmpty || _keepIfNotEmpty(val); }; switch (obj?.constructor) { case Array: return obj.map((v) => _deepClone(v, filterEmpty)).filter(_keep); case Object: const result = {}; for (const [key, val] of Object.entries(obj)) { const clonedVal = _deepClone(val, filterEmpty); if (_keep(clonedVal)) result[key] = clonedVal; } return result; default: return obj; } }; const _jsonSerialize = (params) => { // Since axios 1.0 I've had to manually serialize complex objects. Also with xior. for (const [key, value] of Object.entries(params)) { switch (value?.constructor) { case Array: case Object: params[key] = JSON.stringify(value); } } }; const _addTimestamp = (params, ts) => { if (!ts) { ts = useCommonStore().timestamp; } params.ts = ts; }; export const serializeParams = (data, ts, filterEmpty = true) => { const params = _deepClone(data, filterEmpty) || {}; _jsonSerialize(params); _addTimestamp(params, ts); return params; }; export const getDownloadIOSPWAFix = (href, filename) => { /* * iOS has a download bug inside PWAs. The user is trapped in the * download screen and cannot return to the app. * https://developer.apple.com/forums/thread/95911 * This works around that by creating temporary blob link which * makes the PWA display browser back controls */ HTTP.get(href, { responseType: "blob" }) .then((response) => { const link = document.createElement("a"); const blob = new Blob([response.data], { type: "application/octet-stream", }); link.href = globalThis.URL.createObjectURL(blob); link.download = filename; link.click(); globalThis.URL.revokeObjectURL(response.data); return link.remove(); }) .catch(console.warn); }; const getMtime = (groups, settings) => { const params = serializeParams({ groups, ...settings }, Date.now()); return HTTP.get("/mtime", { params }); }; const getOPDSURLs = () => { return HTTP.get("/opds-urls"); }; const getVersions = (ts) => { const params = { ts }; return HTTP.get("/version", { params }); }; export default { getDownloadIOSPWAFix, getMtime, getOPDSURLs, getVersions, }; ================================================ FILE: frontend/src/api/v3/notify.js ================================================ // Notifications and websockets export const WS_PATH = `${globalThis.CODEX.API_V3_PATH}ws`; function getSocketURL() { const protocol = window.location.protocol === "https:" ? "wss:" : "ws:"; const url = `${protocol}//${window.location.host}${WS_PATH}`; console.debug("[socket] Connecting..."); return url; } export const WS_URL = getSocketURL(); // This MUST export itself export default { WS_URL, }; ================================================ FILE: frontend/src/api/v3/reader.js ================================================ import { serializeParams } from "@/api/v3/common"; import { HTTP } from "./base"; const _getBookPath = (pk) => { return `c/${pk}`; }; const getSettings = (pk, scopes, storyArcPk) => { const basePath = pk ? `${_getBookPath(pk)}/settings` : "c/settings"; const queryParams = { scopes: scopes.join(",") }; if (storyArcPk) { queryParams.story_arc_pk = storyArcPk; } const params = serializeParams(queryParams); return HTTP.get(basePath, { params }); }; const updateSettings = (data) => { return HTTP.patch("c/settings", data); }; const resetSettings = (data) => { return HTTP.delete("c/settings", { data }); }; const getReaderInfo = (pk, data, ts) => { const params = serializeParams(data, ts); const bookPath = _getBookPath(pk); return HTTP.get(bookPath, { params }); }; const _getReaderAPIPath = (pk) => { return globalThis.CODEX.API_V3_PATH + _getBookPath(pk); }; export const getComicPageSource = ({ pk, page, mtime }) => { const bookAPIPath = _getReaderAPIPath(pk); return `${bookAPIPath}/${page}/page.jpg?ts=${mtime}`; }; export const getComicDownloadURL = ({ pk }, fn, ts) => { // Gets used by an HTTP.get so already has base path. const bookPath = _getBookPath(pk); fn = fn ? encodeURIComponent(fn) : `comic-${pk}.cbz`; return `${bookPath}/download/${fn}?ts=${ts}`; }; export const getDownloadPageURL = ({ pk, page, mtime }) => { // Gets used by an HTTP.get so already has base path. const bookPath = _getBookPath(pk); return `${bookPath}/${page}/page.jpg?ts=${mtime}`; }; export const getPDFInBrowserURL = ({ pk, mtime }) => { // Raw URL needs leading slash const bookPath = _getBookPath(pk); return `/${bookPath}/book.pdf?ts=${mtime}`; }; export default { getReaderInfo, getSettings, getPDFInBrowserURL, resetSettings, updateSettings, }; ================================================ FILE: frontend/src/api/v3/vuetify-items.js ================================================ // Shared functions for most metadata components. import { VUETIFY_NULL_CODE } from "@/choices/browser-choices.json"; export const NULL_PKS = new Set(["", VUETIFY_NULL_CODE, undefined, null]); const toVuetifyItem = function (item) { /* * Translates an raw value or an item item into a vuetify item. * Removes nulls, they're detected directly from the choices source. */ let vuetifyItem; if (NULL_PKS.has(item)) { vuetifyItem = item; } else if (item instanceof Object) { if ("ids" in item) { const idSet = new Set(item.ids); if (NULL_PKS.intersection(idSet).size > 0) { vuetifyItem = undefined; } else { const value = item.ids.join(","); vuetifyItem = { value, title: item.name }; } } else if (NULL_PKS.has(item.pk)) { vuetifyItem = { value: item.pk, title: "None" }; } else { vuetifyItem = { value: item.pk, title: item.name }; } } else { vuetifyItem = { value: item, title: item.toString() }; } if (item?.url) { vuetifyItem.url = item.url; } return vuetifyItem; }; const vuetifyItemCompare = function (itemA, itemB) { return itemA.title.localeCompare(itemB.title); }; const vuetifyItemCompareNumeric = function (itemA, itemB) { return Number.parseFloat(itemA.title) - Number.parseFloat(itemB.title); }; export const toVuetifyItems = function ({ items, filter, numeric = false, sort = true, }) { /* * Takes a value (can be a list) and a list of items and * Returns a list of valid items with items arg having preference. */ const sourceItems = items || []; // Case insensitive search for filter-sub-menu const lowerCaseFilter = filter ? filter.toLowerCase() : filter; let noneItem; let computedItems = []; for (const item of sourceItems) { const vuetifyItem = toVuetifyItem(item); if ( vuetifyItem != undefined && (!lowerCaseFilter || vuetifyItem?.title?.toLowerCase().includes(lowerCaseFilter)) ) { if (NULL_PKS.has(vuetifyItem.value)) { noneItem = vuetifyItem; noneItem.value = VUETIFY_NULL_CODE; } else { computedItems.push(vuetifyItem); } } } if (sort) { const sortFunc = numeric ? vuetifyItemCompareNumeric : vuetifyItemCompare; computedItems = computedItems.sort(sortFunc); } if (noneItem) { // Prepend noneItem if it exists. computedItems.unshift(noneItem); } return computedItems; }; export default { toVuetifyItems, NULL_PKS, }; ================================================ FILE: frontend/src/app.vue ================================================ ================================================ FILE: frontend/src/browser.vue ================================================ ================================================ FILE: frontend/src/comic-name.js ================================================ // Create comic names export const formattedVolumeName = function (name, numberTo) { let fmtName; if (name != null && !Number.isNaN(name)) { let compoundName = name; if (numberTo != null && !Number.isNaN(numberTo)) { compoundName += "-" + numberTo; } fmtName = name.length === 4 ? `(${compoundName})` : `v${compoundName}`; } else { fmtName = ""; } return fmtName; }; export const formattedIssue = function ({ issueNumber, issueSuffix }, zeroPad) { let issueStr; try { if (issueNumber == undefined && !issueSuffix) { // Null issue defaults to display #0 issueNumber = 0; } const floatIssue = Number.parseFloat(issueNumber); const intIssue = Math.floor(floatIssue); if (zeroPad === undefined) { zeroPad = 0; } if (floatIssue === intIssue) { issueStr = intIssue.toString(); } else { issueStr = floatIssue.toString(); zeroPad += issueStr.split(".")[1].length + 1; } issueStr = issueStr.padStart(zeroPad, "0"); } catch { issueStr = ""; } if (issueSuffix) { issueStr += issueSuffix; } return issueStr; }; export const getIssueName = function ( { issueNumber, issueSuffix, issueCount }, zeroPad, ) { let issueName = "#" + formattedIssue({ issueNumber, issueSuffix }, zeroPad); if (issueCount) { issueName += ` of ${issueCount}`; } return issueName; }; export const getFullComicName = function ( { seriesName, volumeName, volumeNumberTo, issueNumber, issueSuffix, issueCount, }, zeroPad, ) { // Format a full comic name from the series on down. const fvn = formattedVolumeName(volumeName, volumeNumberTo); const issueName = getIssueName( { issueNumber, issueSuffix, issueCount }, zeroPad, ); return [seriesName, fvn, issueName].filter(Boolean).join(" "); }; export default { getFullComicName, formattedVolumeName, getIssueName, }; ================================================ FILE: frontend/src/components/admin/admin-header.vue ================================================ ================================================ FILE: frontend/src/components/admin/browser-link.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/create-update-button.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/create-update-dialog.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/create-update-inputs-mixin.js ================================================ import { mapActions } from "pinia"; import { toRaw } from "vue"; import { useAdminStore } from "@/stores/admin"; /** * Mixin for create-update input components. * * Required options for importing components: * EMPTY_ROW – with default field values * UPDATE_KEYS – the keys sent on update * * Provides: * props.oldRow, emits["change"], data.row, * watchers that sync row and oldRow and emit "change", * and the nameSet action from the admin store. */ export default { props: { oldRow: { type: [Object, Boolean], default: false, }, }, emits: ["change"], data() { const emptyRow = this.$options.EMPTY_ROW; return { row: this.oldRow ? { ...structuredClone(emptyRow), ...structuredClone(toRaw(this.oldRow)), } : structuredClone(emptyRow), }; }, watch: { row: { handler(to) { this.$emit("change", to); }, deep: true, }, oldRow: { handler(to) { const emptyRow = this.$options.EMPTY_ROW; this.row = to ? { ...structuredClone(emptyRow), ...structuredClone(toRaw(to)) } : structuredClone(emptyRow); }, deep: true, }, }, methods: { ...mapActions(useAdminStore, ["nameSet"]), }, }; ================================================ FILE: frontend/src/components/admin/create-update-dialog/duration-input.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/group-create-update-inputs.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/library-create-update-inputs.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/relation-picker.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/server-folder-picker.vue ================================================ ================================================ FILE: frontend/src/components/admin/create-update-dialog/user-create-update-inputs.vue ================================================ ================================================ FILE: frontend/src/components/admin/drawer/admin-menu.vue ================================================ ================================================ FILE: frontend/src/components/admin/drawer/admin-settings-button-progress.vue ================================================ ================================================ FILE: frontend/src/components/admin/drawer/admin-settings-drawer.vue ================================================ ================================================ FILE: frontend/src/components/admin/drawer/admin-settings-panel.vue ================================================ ================================================ FILE: frontend/src/components/admin/drawer/status-list-item.vue ================================================ ================================================ FILE: frontend/src/components/admin/drawer/status-list.vue ================================================ ================================================ FILE: frontend/src/components/admin/group-chip.vue ================================================ ================================================ FILE: frontend/src/components/admin/status-helpers.js ================================================ /** * Shared status display helpers for admin status components. * * Used by: job-tab.vue, status-list-item.vue, stats-table.vue */ import STATUS_TITLES from "@/choices/admin-status-titles.json"; import { getFormattedDuration, NUMBER_FORMAT } from "@/datetime"; /** Format an integer for display, or "?" if not an integer. */ export const nf = (val) => { return Number.isInteger(val) ? NUMBER_FORMAT.format(val) : "?"; }; /** Whether a status has displayable numeric progress. */ export const hasNumbers = (status) => { return Number.isInteger(status.complete) || Number.isInteger(status.total); }; /** Whether the progress bar should be indeterminate. */ export const isIndeterminate = (status) => { return status.active && (!status.total || !Number.isInteger(status.complete)); }; /** Compute 0–100 progress percentage for a status. */ export const statusProgress = (status) => { if (!status.total || isIndeterminate(status)) { return 0; } return (100 * +status.complete) / +status.total; }; /** Human-readable title for a statusType code. */ export const statusTitle = (statusType) => { return STATUS_TITLES[statusType] || statusType; }; /** Duration string since status became active, or "pending". */ export const statusDuration = (status, now) => { if (status.active) { const activeTime = new Date(status.active).getTime(); return getFormattedDuration(activeTime, now); } if (status.preactive) { return "pending"; } return ""; }; /** "X ago" string since status.updatedAt, or "". */ export const statusUpdatedAgo = (status, now) => { if (!status.updatedAt) { return ""; } const updatedTime = new Date(status.updatedAt).getTime(); const ago = getFormattedDuration(updatedTime, now); return `${ago} ago`; }; ================================================ FILE: frontend/src/components/admin/tabs/admin-table.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/custom-covers-panel.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/datetime-column.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/delete-row-dialog.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/failed-imports-panel.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/flag-descriptions.json ================================================ { "AU": "If enabled, codex will attempt to update the codex python package once a day and restart the server if an update occurred. Not advisable if running from Docker as running containers aren't meant to hold state and will be rebuilt if you update the image. Look into services that automatically update docker images instead.", "NU": "By default all Codex features, including bookmarking, are available to users who are not logged in. You may disable this feature and Codex will hide its browser and reader and disable its API and OPDS from anyone who is not logged in as a user.", "RG": "By default users' bookmarks and preferences are saved in an anonymous browser session. Users can create a username and password to save their bookmarks between browsers. You may disable this feature. Admins may still create users.", "FV": "By default, codex provides a \"Folder View\" which mimics the directory hierarchy of the libraries that you've added to Codex. You may disable this feature. The database style browser view is always available. This flag also enables and disables the \"Filename\" sort option and the path from search indexing.", "IM": "If disabled, Codex will not bulk import metadata from comic libraries when they are scanned and only import folders and filenames. This makes importing comics fast, but disables most of the metadata browser and search functionality for lack of data, making codex mostly a file tree browser.", "LI": "Import metadata for books that have none one at a time when you hover over the 🏷 button. On many systems this will happen fast enough that clicking the button will show populated metadata. This gives users individual metadata on demand for setups that have Bulk Import Metadata disabled above and lets them slowly populate the database. Eventually there might be enough data for search and filtering to be useful.", "ST": "Anonymously send the contents of the Admin Stats tab once a week to understand how people use Codex and improve the software. The server collecting the stats does not log or store your IP. The author considers all personally identifying information and the names and metadata of your comics to be a radioactive privacy violation and will never be exposed to it. Only the information you see on the stats page is collected. Your API Key is not sent. Stats are not sent for the first 24 hours to allow opt-out.", "BT": "A customized text headline that appears at the top of every codex page." } ================================================ FILE: frontend/src/components/admin/tabs/flag-tab.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/group-tab.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/job-tab.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/library-tab.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/library-table.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/relation-chips.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/stats-tab.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/stats-table.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/tabs.vue ================================================ ================================================ FILE: frontend/src/components/admin/tabs/user-tab.vue ================================================ ================================================ FILE: frontend/src/components/admin/use-now-timer.js ================================================ /** * Reactive "now" timestamp that ticks every second. * * Used by: job-tab.vue, status-list.vue */ import { ref, onMounted, onUnmounted } from "vue"; const TICK_INTERVAL_MS = 1000; /** * Returns a reactive `now` ref (Date.now()) that auto-updates every second * while the component is mounted. Cleans up on unmount. */ export function useNowTimer() { const now = ref(Date.now()); let timer = 0; const start = () => { stop(); timer = globalThis.setInterval(() => { now.value = Date.now(); }, TICK_INTERVAL_MS); }; const stop = () => { if (timer) { globalThis.clearInterval(timer); timer = 0; } }; onMounted(start); onUnmounted(stop); return { now }; } ================================================ FILE: frontend/src/components/anchors.scss ================================================ a, a > .v-icon { color: rgb(var(--v-theme-primary)) !important; } /* a:visited, a:visited > .v-icon { color: rgb(var(--v-theme-secondary)) !important; } */ a:hover, a:hover > .v-icon { color: rgb(var(--v-theme-textPrimary)) !important; } a.v-btn > .v-icon { color: rgb(var(--v-theme-textPrimary)); } ================================================ FILE: frontend/src/components/auth/auth-form-mixin.js ================================================ /** * Mixin for auth form dialogs: login, change-password. * * Provides: * - setup(): blocks keyup propagation while dialog is open (via useEventListener) * - data.submitButtonEnabled * - Deep watcher on `credentials` that auto-validates via $refs.form * - Computed: formErrors, formSuccess from commonStore * * Using components require: * - data.credentials (their own shape) * - data.rules * - A in their template */ import { mapState } from "pinia"; import { useEventListener } from "@vueuse/core"; import { useCommonStore } from "@/stores/common"; export default { setup() { // Prevent keystrokes from leaking through dialogs to underlying views // (e.g. reader keyboard shortcuts). useEventListener(globalThis, "keyup", (event) => { event.stopImmediatePropagation(); }); }, data() { return { submitButtonEnabled: false, }; }, computed: { ...mapState(useCommonStore, { formErrors: (state) => state.form.errors, formSuccess: (state) => state.form.success, }), }, watch: { credentials: { handler() { const form = this.$refs.form; if (!form) { this.submitButtonEnabled = false; return; } form .validate() .then(({ valid }) => { this.submitButtonEnabled = valid; return this.submitButtonEnabled; }) .catch(() => { this.submitButtonEnabled = false; }); }, deep: true, }, }, }; ================================================ FILE: frontend/src/components/auth/auth-menu.vue ================================================ ================================================ FILE: frontend/src/components/auth/auth-token.vue ================================================ ================================================ FILE: frontend/src/components/auth/change-password-dialog.vue ================================================ ================================================ FILE: frontend/src/components/auth/login-dialog.vue ================================================ ================================================ FILE: frontend/src/components/banner.vue ================================================ ================================================ FILE: frontend/src/components/book-cover.scss ================================================ @use "sass:math"; $cover-ratio: 1.5372233400402415; /* Modal cover ratio */ $cover-width: 165px; $cover-height: math.round(calc($cover-ratio * $cover-width)); $small-cover-width: 100px; $small-cover-height: math.round(calc($cover-ratio * $small-cover-width)); ================================================ FILE: frontend/src/components/book-cover.vue ================================================ ================================================ FILE: frontend/src/components/browser/browser-header.vue ================================================ ================================================ FILE: frontend/src/components/browser/card/browser-card-menu.vue ================================================ ================================================ FILE: frontend/src/components/browser/card/card.vue ================================================ ================================================ FILE: frontend/src/components/browser/card/controls.vue ================================================ ================================================ FILE: frontend/src/components/browser/card/order-by-caption.vue ================================================ ================================================ FILE: frontend/src/components/browser/card/subtitle.vue ================================================ ================================================ FILE: frontend/src/components/browser/drawer/browser-settings-covers.vue ================================================ ================================================ FILE: frontend/src/components/browser/drawer/browser-settings-drawer.vue ================================================ ================================================ FILE: frontend/src/components/browser/drawer/browser-settings-group.vue ================================================ ================================================ FILE: frontend/src/components/browser/drawer/browser-settings-misc.vue ================================================ ================================================ FILE: frontend/src/components/browser/drawer/browser-settings-panel.vue ================================================ ================================================ FILE: frontend/src/components/browser/drawer/browser-settings-saved.vue ================================================ ================================================ FILE: frontend/src/components/browser/empty.vue ================================================ ================================================ FILE: frontend/src/components/browser/filter-warning-snackbar.vue ================================================ ================================================ FILE: frontend/src/components/browser/main.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/breadcrumbs/breadcrumbs.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/breadcrumbs/browser-toolbar-breadcrumbs.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/browser-toolbar-title.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/nav/browser-nav-button.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/nav/browser-toolbar-nav.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/search/browser-toolbar-search.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/search/search-combobox.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/search/search-help-text.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/search/search-help.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/select-many/browser-toolbar-select-many.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/browser-toolbar-top.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/filter-by-select.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/filter-sub-menu.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/order-by-select.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/order-reverse-button.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/search-button.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/toolbar-button.vue ================================================ ================================================ FILE: frontend/src/components/browser/toolbars/top/top-group-select.vue ================================================ ================================================ FILE: frontend/src/components/cancel-button.vue ================================================ ================================================ FILE: frontend/src/components/clipboard.vue ================================================ ================================================ FILE: frontend/src/components/close-button.vue ================================================ ================================================ FILE: frontend/src/components/codex-list-item.vue ================================================ ================================================ FILE: frontend/src/components/confirm-dialog.vue ================================================ ================================================ FILE: frontend/src/components/confirm-footer.vue ================================================ ================================================ FILE: frontend/src/components/download-button.vue ================================================ ================================================ FILE: frontend/src/components/empty.vue ================================================ ================================================ FILE: frontend/src/components/mark-read-button.vue ================================================ ================================================ FILE: frontend/src/components/metadata/expand-button.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-activator.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-body.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-chip.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-controls.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-cover.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-dialog.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-header.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-ratings.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-tags.vue ================================================ ================================================ FILE: frontend/src/components/metadata/metadata-text.vue ================================================ ================================================ FILE: frontend/src/components/metadata/table.scss ================================================ td { border-bottom: none !important; } td.key { color: rgb(var(--v-theme-textSecondary)); width: 1%; white-space: nowrap; } ================================================ FILE: frontend/src/components/metadata/tags-table.vue ================================================ ================================================ FILE: frontend/src/components/pagination-nav-button.vue ================================================ ================================================ FILE: frontend/src/components/pagination-slider.vue ================================================ ================================================ FILE: frontend/src/components/pagination-toolbar.vue ================================================ ================================================ FILE: frontend/src/components/placeholder-loading.vue ================================================ ================================================ FILE: frontend/src/components/reader/book-change-activator.vue ================================================ ================================================ FILE: frontend/src/components/reader/book-change-drawer.vue ================================================ ================================================ FILE: frontend/src/components/reader/books-window.vue ================================================ ================================================ FILE: frontend/src/components/reader/change-column.scss ================================================ .changeColumn { position: fixed; top: 48px; height: calc(100vh - 96px); width: 33vw; } ================================================ FILE: frontend/src/components/reader/drawer/download-panel.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/keyboard-shortcuts-panel.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/keyboard-shortcuts-table.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/reader-settings-controls.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/reader-settings-drawer.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/reader-settings-panel.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/reader-settings-reader.vue ================================================ ================================================ FILE: frontend/src/components/reader/drawer/reader-settings-scope.vue ================================================ ================================================ FILE: frontend/src/components/reader/empty.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/horizontal-pages.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/page/page-error.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/page/page-img.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/page/page-loading.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/page/page.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/page-change-link.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/pager-full-pdf.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/pager-horizontal.vue ================================================ /* Inherits v-window styles from books-window */ ================================================ FILE: frontend/src/components/reader/pager/pager-vertical.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/pager.vue ================================================ /* Inherits v-window styles from books-window */ ================================================ FILE: frontend/src/components/reader/pager/pdf-doc.vue ================================================ ================================================ FILE: frontend/src/components/reader/pager/scale-for-scroll.vue ================================================ ================================================ FILE: frontend/src/components/reader/toolbars/nav/reader-book-change-nav-button.vue ================================================ ================================================ FILE: frontend/src/components/reader/toolbars/nav/reader-nav-button.vue ================================================ ================================================ FILE: frontend/src/components/reader/toolbars/nav/reader-toolbar-nav.vue ================================================ ================================================ FILE: frontend/src/components/reader/toolbars/top/reader-arc-select.vue ================================================ ================================================ FILE: frontend/src/components/reader/toolbars/top/reader-toolbar-top.vue ================================================ ================================================ FILE: frontend/src/components/scale-button.vue ================================================ ================================================ FILE: frontend/src/components/settings/button.vue ================================================ ================================================ FILE: frontend/src/components/settings/docs-footer.vue ================================================ ================================================ FILE: frontend/src/components/settings/opds-dialog.vue ================================================ ================================================ FILE: frontend/src/components/settings/opds-url.vue ================================================ ================================================ FILE: frontend/src/components/settings/settings-drawer.vue ================================================ ================================================ FILE: frontend/src/components/settings/version-footer.vue ================================================ ================================================ FILE: frontend/src/components/submit-footer.vue ================================================ ================================================ FILE: frontend/src/components/toolbar-select.vue ================================================ ================================================ FILE: frontend/src/components/unauthorized.vue ================================================ ================================================ FILE: frontend/src/datetime.js ================================================ /* * Date & time formats * Date is forced to YYYY-MM-DD with sv-SE * Time is by default undefined and browser based but can be forced to sv-SE 24 HR. * XXX Force to 24 hr is probably superfluous at this point * const TWELVE_HOUR_LOCALE = "en-NZ"; */ const TWENTY_FOUR_HOUR_LOCALE = "sv-SE"; export const DATE_FORMAT = new Intl.DateTimeFormat(TWENTY_FOUR_HOUR_LOCALE); export const NUMBER_FORMAT = new Intl.NumberFormat(); export const DURATION_FORMAT = new Intl.DurationFormat("en", { style: "digital", daysDisplay: "auto", hoursDisplay: "auto", minutesDisplay: "auto", }); const MINUTE_SECONDS = 60; const HOUR_SECONDS = MINUTE_SECONDS * 60; const DAY_SECONDS = HOUR_SECONDS * 24; export const getTimeFormat = function (twentyFourHourTime) { const locale = twentyFourHourTime ? TWENTY_FOUR_HOUR_LOCALE : undefined; return new Intl.DateTimeFormat(locale, { timeStyle: "medium", }); }; export const getDateTime = function (dttm, twentyFourHourTime, br = false) { const date = new Date(dttm); const dttm_date = DATE_FORMAT.format(date); const timeFormat = getTimeFormat(twentyFourHourTime); const dttm_time = timeFormat.format(date); const divider = br ? "
" : ", "; return dttm_date + divider + dttm_time; }; export const getTimestamp = function () { return Math.floor(Date.now() / 1000); }; export const getFormattedDuration = (fromTime, toTime) => { const totalSeconds = Math.floor((toTime - fromTime) / 1000); const duration = { days: Math.floor(totalSeconds / DAY_SECONDS), hours: Math.floor((totalSeconds % DAY_SECONDS) / HOUR_SECONDS), minutes: Math.floor((totalSeconds % HOUR_SECONDS) / MINUTE_SECONDS), seconds: totalSeconds % MINUTE_SECONDS, }; return DURATION_FORMAT.format(duration); }; export default { DATE_FORMAT, NUMBER_FORMAT, getDateTime, getFormattedDuration, getTimeFormat, getTimestamp, }; ================================================ FILE: frontend/src/http-error.vue ================================================ ================================================ FILE: frontend/src/main.js ================================================ import "@mdi/font/css/materialdesignicons.css"; import "vuetify/styles"; // Global CSS has to be imported import { createHead, VueHeadMixin } from "@unhead/vue/client"; import { createApp } from "vue"; import dragScrollDirective from "@/plugins/drag-scroll"; import router from "@/plugins/router"; import vuetify from "@/plugins/vuetify"; import { setupStore } from "@/stores/store"; import App from "@/app.vue"; const app = createApp(App); app.config.performance = import.meta.env.PROD; app.use(vuetify); setupStore(app); app.use(router); app.use(createHead()); app.mixin(VueHeadMixin); app.directive("drag-scroller", dragScrollDirective); router .isReady() .then(() => { return app.mount("#App"); }) // Top level await would require a plugin .catch(console.error); export default app; ================================================ FILE: frontend/src/platform.js ================================================ // Identify platforms for special behaviors const _IS_MOBILE_RE = /iP(?:ad|hone|od)|Android/; // codespell:ignore od const _IS_MOBILE_UA = _IS_MOBILE_RE.test(navigator.userAgent); export const IS_MOBILE = _IS_MOBILE_UA || globalThis.orientation !== undefined; /* *export const IS_TOUCH = * "ontouchstart" in window || * navigator.maxTouchPoints > 0 || * navigator.msMaxTouchPoints > 0 || * window.matchMedia("(any-hover: none)").matches; */ ================================================ FILE: frontend/src/plugins/drag-scroll.js ================================================ // v-drag-scroll directive const DRAG_THRESHOLD = 4; // pixels before we commit to a drag const dragScrollDirective = { mounted(el, binding) { const onlyX = binding.modifiers.onlyX; const onlyY = binding.modifiers.onlyY; let startX = 0; let startY = 0; let scrollLeft = 0; let scrollTop = 0; let pointerId = null; let dragging = false; const onPointerDown = (e) => { // Don't intercept clicks on interactive children if (e.target.closest("button, a, input, [role=button]")) return; startX = e.clientX; startY = e.clientY; scrollLeft = el.scrollLeft; scrollTop = el.scrollTop; pointerId = e.pointerId; dragging = false; // not committed yet }; const onPointerMove = (e) => { if (pointerId === null) return; const dx = e.clientX - startX; const dy = e.clientY - startY; if (!dragging) { // Only commit to drag once threshold is exceeded if (Math.abs(dx) < DRAG_THRESHOLD && Math.abs(dy) < DRAG_THRESHOLD) return; dragging = true; el.setPointerCapture(pointerId); el.style.cursor = "grabbing"; } if (!onlyY) el.scrollLeft = scrollLeft - dx; if (!onlyX) el.scrollTop = scrollTop - dy; }; const onPointerUp = () => { if (dragging && pointerId !== null) { el.releasePointerCapture(pointerId); el.style.cursor = ""; } pointerId = null; dragging = false; }; el._dragScroll = { onPointerDown, onPointerMove, onPointerUp }; el.addEventListener("pointerdown", onPointerDown); el.addEventListener("pointermove", onPointerMove); el.addEventListener("pointerup", onPointerUp); el.addEventListener("pointercancel", onPointerUp); }, unmounted(el) { const { onPointerDown, onPointerMove, onPointerUp } = el._dragScroll; el.removeEventListener("pointerdown", onPointerDown); el.removeEventListener("pointermove", onPointerMove); el.removeEventListener("pointerup", onPointerUp); el.removeEventListener("pointercancel", onPointerUp); }, }; export default dragScrollDirective; ================================================ FILE: frontend/src/plugins/router.js ================================================ import { createRouter, createWebHistory } from "vue-router"; import { lastRoute } from "@/choices/browser-defaults.json"; const MainAdmin = () => import("@/admin.vue"); const MainBrowser = () => import("@/browser.vue"); const HttpError = () => import("@/http-error.vue"); const MainReader = () => import("@/reader.vue"); const AdminFlagsTab = () => import("@/components/admin/tabs/flag-tab.vue"); const AdminUsersTab = () => import("@/components/admin/tabs/user-tab.vue"); const AdminGroupsTab = () => import("@/components/admin/tabs/group-tab.vue"); const AdminLibrariesTab = () => import("@/components/admin/tabs/library-tab.vue"); const AdminJobsTab = () => import("@/components/admin/tabs/job-tab.vue"); const AdminStatsTab = () => import("@/components/admin/tabs/stats-tab.vue"); const LAST_ROUTE = { name: "browser", params: globalThis.CODEX.LAST_ROUTE || lastRoute, }; const routes = [ { name: "home", path: "/", redirect: LAST_ROUTE, }, { name: "reader", path: "/c/:pk/:page", component: MainReader, }, { name: "browser", path: "/:group/:pks/:page", component: MainBrowser, }, { name: "admin", path: "/admin", component: MainAdmin, redirect: "/admin/libraries", children: [ { name: "admin-users", path: "users", component: AdminUsersTab, }, { name: "admin-groups", path: "groups", component: AdminGroupsTab }, { name: "admin-libraries", path: "libraries", component: AdminLibrariesTab, }, { name: "admin-flags", path: "flags", component: AdminFlagsTab }, { name: "admin-jobs", path: "jobs", component: AdminJobsTab }, { name: "admin-stats", path: "stats", component: AdminStatsTab }, ], }, { name: "error", path: "/error/:code", component: HttpError, props: true }, { name: "404", path: "/:pathMatch(.*)*", redirect: "/error/404" }, ]; const router = new createRouter({ history: createWebHistory(globalThis.CODEX.APP_PATH), routes, }); router.afterEach((to) => { // Strip the ts cache-busting query param from the visible URL. // Vue Router's $route watcher already captured the value before this fires. if (to.query?.ts !== undefined) { const { ts, ...query } = to.query; const cleanRoute = router.resolve({ name: to.name, params: to.params, query, hash: to.hash, }); history.replaceState(history.state, "", cleanRoute.href); } }); export default router; ================================================ FILE: frontend/src/plugins/vuetify.js ================================================ import { createVuetify } from "vuetify"; import { aliases, mdi } from "vuetify/iconsets/mdi-svg"; const WHITE = "#FFFFFF"; const DISABLED = "#808080"; const codexTheme = { dark: true, colors: { // -- built in --- primary: "#CC7B19", // codex orange // '#1976D2' - light blue "primary-darken-1": "#965B13", /* * secondary: "#03DAC5", // blue * "secondary-darken-1": "#02a191", * accent: "#FF4081", // pinkish */ error: "#DC143C", // crimson // info: "#2196F3", // lightblue (similar to primary) success: "#14dc3c", // crimsongreen // warning: "#FB8C00", // soft orange "surface-light": "#2A2A2A", // --- custom --- linkHover: WHITE, textPrimary: WHITE, textHeader: "#D3D3D3", textSecondary: "#A9A9A9", textDisabled: DISABLED, iconsInactive: DISABLED, includeGroup: "#151", excludeGroup: "#511", }, }; export default new createVuetify({ defaults: { global: { ripple: true, }, VCheckbox: { color: codexTheme.colors.primary, }, VCheckboxBtn: { color: codexTheme.colors.primary, }, VCombobox: { color: codexTheme.colors.primary, }, VProgressLinear: { color: codexTheme.colors.primary, }, VProgressCircular: { color: codexTheme.colors.primary, }, VRadioGroup: { color: codexTheme.colors.primary, }, VSelect: { color: codexTheme.colors.primary, }, VSlider: { color: codexTheme.colors.primary, }, VTabs: { color: codexTheme.colors.primary, }, VTextField: { color: codexTheme.colors.primary, }, }, theme: { defaultTheme: "codexTheme", options: { customProperties: true, }, themes: { codexTheme, }, }, icons: { defaultSet: "mdi", aliases, sets: { mdi, }, }, }); ================================================ FILE: frontend/src/reader.vue ================================================ ================================================ FILE: frontend/src/route.js ================================================ const REVERSE_READING_DIRECTIONS = Object.freeze(new Set("rtl", "btt")); export const getReaderRoute = ( { ids, page, readingDirection, pageCount }, importMetadata, ) => { // Get the route to a comic with the correct entry page. if (ids.length === 0 || (importMetadata && !pageCount)) { return ""; } const pk = ids[0]; if (page) { page = Number(page); } else if (REVERSE_READING_DIRECTIONS.has(readingDirection)) { const maxPage = Number(pageCount) - 1; page = Math.max(maxPage, 0); } else { page = 0; } return { name: "reader", params: { pk, page }, }; }; export default { getReaderRoute, }; ================================================ FILE: frontend/src/stores/admin.js ================================================ import { defineStore } from "pinia"; import API from "@/api/v3/admin"; import { useAuthStore } from "@/stores/auth"; import { useCommonStore } from "@/stores/common"; const warnError = (error) => console.warn(error); const IRREGULAR_PLURALS = Object.freeze({ ActiveLibrarianStatus: "ActiveLibrarianStatuses", Library: "Libraries", }); export const TABS = Object.freeze([ "Users", "Groups", "Libraries", "Flags", "Jobs", "Stats", ]); const getTablePlural = (table) => { if (table in IRREGULAR_PLURALS) { return IRREGULAR_PLURALS[table]; } return table + "s"; }; export const useAdminStore = defineStore("admin", { state: () => ({ allLibrarianStatuses: {}, activeLibrarianStatuses: [], unseenFailedImports: false, users: [], groups: [], libraries: undefined, failedImports: [], flags: [], folderPicker: { root: undefined, folders: [], }, timestamps: {}, stats: undefined, activeTab: "Libraries", }), getters: { isUserAdmin() { const authStore = useAuthStore(); return authStore.isUserAdmin; }, normalLibraries() { const libs = []; if (this.libraries) { for (const library of this.libraries) { if (!library.coversOnly) { libs.push(library); } } } return libs; }, customCoverLibraries() { const libs = []; if (this.libraries) { for (const library of this.libraries) { if (library.coversOnly) { libs.push(library); } } } return libs; }, doNormalComicLibrariesExist() { return Object.keys(this.normalLibraries).length > 0; }, }, actions: { /** Guard: returns true and early-exits the caller if not admin. */ _requireAdmin() { return !this.isUserAdmin; }, async loadTable(table) { if (this._requireAdmin()) return false; const pluralTable = getTablePlural(table); const apiFn = "get" + pluralTable; await API[apiFn]() .then((response) => { const stateField = pluralTable.charAt(0).toLowerCase() + pluralTable.slice(1); if (Array.isArray(response.data)) { this[stateField] = response.data; return true; } else { console.warn(stateField, "response not an array"); return false; } }) .catch(warnError); }, loadTables(tables) { if (this._requireAdmin()) return false; for (const table of tables) { this.loadTable(table); } }, async loadFolders(path, showHidden) { if (this._requireAdmin()) return false; await API.getFolders(path, showHidden) .then((response) => { this.folderPicker = response.data; return true; }) .catch(useCommonStore().setErrors); }, async clearFolders(root) { if (this._requireAdmin()) return false; this.folderPicker = { root, folders: [""] }; }, async createRow(table, data) { if (this._requireAdmin()) return false; const apiFn = "create" + table; const commonStore = useCommonStore(); await API[apiFn](data) .then(() => { commonStore.clearErrors(); return this.loadTable(table); }) .catch(commonStore.setErrors); }, async updateRow(table, pk, data) { if (this._requireAdmin()) return false; const apiFn = "update" + table; const commonStore = useCommonStore(); await API[apiFn](pk, data) .then(() => { commonStore.clearErrors(); return this.loadTable(table); }) .catch(commonStore.setErrors); }, async changeUserPassword(pk, data) { if (this._requireAdmin()) return false; const commonStore = useCommonStore(); await API.changeUserPassword(pk, data) .then((response) => { commonStore.setSuccess(response.data.detail); return true; }) .catch(commonStore.setErrors); }, async deleteRow(table, pk) { if (this._requireAdmin()) return false; const apiFn = "delete" + table; const commonStore = useCommonStore(); await API[apiFn](pk) .then(() => { commonStore.clearErrors(); return this.loadTable(table); }) .catch(commonStore.setErrors); }, async librarianTask(task, text, libraryId) { if (this._requireAdmin()) return false; const commonStore = useCommonStore(); await API.postLibrarianTask({ task, libraryId }) .then(() => commonStore.setSuccess(text)) .catch(commonStore.setErrors); }, nameSet(rows, nameKey, oldRow, dupeCheck) { if (this._requireAdmin()) return false; const names = new Set(); if (rows) { for (const obj of rows) { if (!dupeCheck || !oldRow || obj[nameKey] !== oldRow[nameKey]) { names.add(obj[nameKey]); } } } return names; }, async loadStats() { if (this._requireAdmin()) return false; await API.getStats() .then((response) => { this.stats = response.data; return true; }) .catch(console.warn); }, async loadAllStatuses() { if (this._requireAdmin()) return false; await API.getAllLibrarianStatuses() .then((response) => { if (Array.isArray(response.data)) { const map = {}; for (const status of response.data) { map[status.statusType] = status; } this.allLibrarianStatuses = map; } return true; }) .catch(console.warn); }, async updateAPIKey() { if (this._requireAdmin()) return false; await API.updateAPIKey() .then(() => { return true; }) .catch(console.warn); }, }, }); ================================================ FILE: frontend/src/stores/auth.js ================================================ import { defineStore } from "pinia"; import API from "@/api/v3/auth"; import { useCommonStore } from "@/stores/common"; /* * Don't use router in here, perhaps called to early. * Breaks the prod build. */ export const useAuthStore = defineStore("auth", { state: () => ({ adminFlags: { registration: undefined, nonUsers: undefined, bannerText: undefined, lazyImportMetadata: undefined, }, user: undefined, token: undefined, MIN_PASSWORD_LENGTH: 4, showLoginDialog: false, showChangePasswordDialog: false, showAuthTokenDialog: false, }), getters: { isAuthorized() { return Boolean(this.user || this.adminFlags.nonUsers); }, isAuthChecked() { return ( this.user !== undefined || this.adminFlags.registration !== undefined ); }, isUserAdmin() { return this.user && (this.user.isStaff || this.user.isSuperuser); }, isAuthDialogOpen() { return this.showLoginDialog || this.showChangePasswordDialog; }, isBanner(state) { return Boolean(state.adminFlags.bannerText); }, }, actions: { async loadAdminFlags() { await API.getAdminFlags() .then((response) => { this.adminFlags = response.data; return true; }) .catch(console.error); }, async loadProfile() { return API.getProfile() .then((response) => { this.user = response.data; return true; }) .catch(console.debug); }, async login(credentials, clear = true) { const commonStore = useCommonStore(); await API.login(credentials) .then(() => { if (clear) { commonStore.clearErrors(); } return this.loadProfile(); }) .catch(commonStore.setErrors); }, async register(credentials) { const commonStore = useCommonStore(); await API.register(credentials) .then(() => { commonStore.clearErrors(); return this.login(credentials); }) .catch(commonStore.setErrors); }, logout() { API.logout() .then(() => { this.user = undefined; return true; }) .catch(console.error); }, async changePassword(credentials) { const changedCredentials = { username: this.user.username, password: credentials.password, }; const commonStore = useCommonStore(); await API.updatePassword(credentials) .then((response) => { commonStore.setSuccess(response.data.detail); return this.login(changedCredentials, false); }) .catch(commonStore.setErrors); }, async setTimezone() { if (this.adminFlags.nonUsers || this.user) { await API.updateTimezone().catch(console.error); } }, async getToken() { await API.getToken() .then((response) => (this.token = response.data.token)) .catch(console.error); }, async updateToken() { await API.updateToken() .then((response) => (this.token = response.data.token)) .catch(console.error); }, }, }); ================================================ FILE: frontend/src/stores/browser-select-many.js ================================================ import { defineStore } from "pinia"; import API from "@/api/v3/browser"; import { getGroupDownloadURL } from "@/api/v3/browser"; import { getTimestamp } from "@/datetime"; import { useBrowserStore } from "@/stores/browser"; const _itemKey = (item) => { return `${item.group}:${item.ids.join(",")}`; }; const _groupSelectedItems = (selectedItems) => { // Group selected items by their group field. const grouped = {}; for (const item of selectedItems.values()) { if (!grouped[item.group]) { grouped[item.group] = []; } grouped[item.group].push(item); } return grouped; }; const _collectPks = (items) => { // Collect all unique pks from a list of items. const pks = new Set(); for (const item of items) { for (const pk of item.ids) { pks.add(pk); } } return [...pks].sort((a, b) => a - b); }; export const useBrowserSelectManyStore = defineStore("browserSelectMany", { state: () => ({ active: false, selectedItems: new Map(), }), getters: { selectedCount(state) { return state.selectedItems.size; }, isSelected(state) { return (item) => state.selectedItems.has(_itemKey(item)); }, hasSelection(state) { return state.selectedItems.size > 0; }, /** * Build a composite item suitable for the metadata dialog. * Combines all selected items into one object with merged ids. */ compositeItem(state) { if (state.selectedItems.size === 0) { return null; } const grouped = _groupSelectedItems(state.selectedItems); const groups = Object.keys(grouped); // Use the first group type found. const group = groups[0]; const items = grouped[group]; const pks = _collectPks(items); const names = items.map((item) => item.name).filter(Boolean); const childCount = items.reduce( (sum, item) => sum + (item.childCount || 1), 0, ); return { group, ids: pks, pks, name: names.length > 1 ? `${names.length} items` : names[0] || "", childCount, finished: null, }; }, }, actions: { deactivate() { this.active = false; this.selectedItems = new Map(); }, toggleItem(item) { if (!this.active) { this.active = true; } const key = _itemKey(item); if (this.selectedItems.has(key)) { this.selectedItems.delete(key); } else { this.selectedItems.set(key, item); } // Trigger reactivity by replacing the map. this.selectedItems = new Map(this.selectedItems); // Deactivate if nothing selected. if (this.selectedItems.size === 0) { this.active = false; } }, selectAll() { this.active = true; const browserStore = useBrowserStore(); const cards = [ ...(browserStore.page.groups ?? []), ...(browserStore.page.books ?? []), ]; for (const item of cards) { const key = _itemKey(item); this.selectedItems.set(key, item); } this.selectedItems = new Map(this.selectedItems); }, clearSelection() { this.selectedItems = new Map(); this.active = false; }, async markFinished(finished) { const browserStore = useBrowserStore(); if (!browserStore.isAuthorized || this.selectedItems.size === 0) { return; } const grouped = _groupSelectedItems(this.selectedItems); const promises = []; for (const [group, items] of Object.entries(grouped)) { const pks = _collectPks(items); const params = { group, ids: pks }; promises.push( API.updateGroupBookmarks(params, browserStore.filterOnlySettings, { finished, }), ); } await Promise.all(promises); browserStore.loadBrowserPage(getTimestamp()); }, download() { const browserStore = useBrowserStore(); if (this.selectedItems.size === 0) { return; } const grouped = _groupSelectedItems(this.selectedItems); for (const [group, items] of Object.entries(grouped)) { const pks = _collectPks(items); const groupName = browserStore.groupNames[group] || "Items"; const plural = groupName.endsWith("s") ? groupName : groupName + "s"; const fn = `Selected ${plural}.zip`; const settings = browserStore.filterOnlySettings; const url = getGroupDownloadURL({ group, pks }, fn, settings, 0); const link = document.createElement("a"); link.download = fn; link.href = url; link.click(); link.remove(); } }, }, }); ================================================ FILE: frontend/src/stores/browser.js ================================================ import { dequal } from "dequal"; import { defineStore } from "pinia"; import API from "@/api/v3/browser"; import COMMON_API from "@/api/v3/common"; import BROWSER_CHOICES from "@/choices/browser-choices.json"; import BROWSER_DEFAULTS from "@/choices/browser-defaults.json"; import { IDENTIFIER_SOURCES, TOP_GROUP } from "@/choices/browser-map.json"; import { READING_DIRECTION } from "@/choices/reader-map.json"; import { getTimestamp } from "@/datetime"; import router from "@/plugins/router"; import { useAuthStore } from "@/stores/auth"; const GROUPS = Object.freeze("rpisvc"); export const GROUPS_REVERSED = Object.freeze([...GROUPS].reverse().join("")); const HTTP_REDIRECT_CODES = Object.freeze(new Set([301, 302, 303, 307, 308])); const DEFAULT_BOOKMARK_VALUES = Object.freeze( new Set([undefined, null, BROWSER_DEFAULTS.bookmarkFilter]), ); const ALWAYS_ENABLED_TOP_GROUPS = Object.freeze(new Set(["a", "c"])); const NO_REDIRECT_ON_SEARCH_GROUPS = Object.freeze(new Set(["a", "c", "f"])); const NON_BROWSE_GROUPS = Object.freeze(new Set(["a", "f"])); const SEARCH_HIDE_TIMEOUT = 5000; const COVER_KEYS = Object.freeze(["customCovers", "dynamicCovers", "show"]); const DYNAMIC_COVER_KEYS = Object.freeze([ "filters", "orderBy", "orderReverse", "q", ]); const FILTER_ONLY_KEYS = Object.freeze(["filters", "q"]); const METADATA_LOAD_KEYS = Object.freeze(["filters", "q", "mtime"]); const redirectRoute = (route) => { if (route && route.params) { router.push(route).catch(console.warn); } }; const notEmptyOrBool = (value) => { return (value && Object.keys(value).length > 0) || typeof value === "boolean"; }; export const useBrowserStore = defineStore("browser", { state: () => ({ choices: { static: Object.freeze({ bookmark: BROWSER_CHOICES.BOOKMARK_FILTER, groupNames: TOP_GROUP, settingsGroup: BROWSER_CHOICES.SETTINGS_GROUP, readingDirection: READING_DIRECTION, identifierSources: IDENTIFIER_SOURCES, }), dynamic: undefined, }, settings: { breadcrumbs: [], customCovers: BROWSER_DEFAULTS.customCovers, dynamicCovers: BROWSER_DEFAULTS.dynamicCovers, filters: BROWSER_DEFAULTS.filters, orderBy: BROWSER_DEFAULTS.orderBy, orderReverse: BROWSER_DEFAULTS.orderReverse, search: BROWSER_DEFAULTS.search, show: BROWSER_DEFAULTS.show, topGroup: BROWSER_DEFAULTS.topGroup, twentyFourHourTime: BROWSER_DEFAULTS.twentyFourHourTime, }, page: { adminFlags: { // determined by api folderView: undefined, importMetadata: undefined, }, title: { groupName: undefined, groupCount: undefined, }, librariesExist: undefined, modelGroup: undefined, numPages: 1, groups: [], books: [], fts: undefined, searchError: undefined, mtime: 0, }, // LOCAL UI filterMode: "base", zeroPad: 0, browserPageLoaded: false, isSearchOpen: false, isSearchHelpOpen: false, searchHideTimeout: undefined, // Saved settings savedSettingsList: [], savedSettingsSnackbar: [], }), getters: { groupNames() { const groupNames = {}; for (const [key, pluralName] of Object.entries(TOP_GROUP)) { groupNames[key] = pluralName === "Series" ? pluralName : pluralName.slice(0, -1); } return groupNames; }, topGroupChoices() { const choices = []; for (const item of BROWSER_CHOICES.TOP_GROUP) { if (this._isRootGroupEnabled(item.value)) { choices.push(item); } } return choices; }, topGroupChoicesMaxLen() { return this._maxLenChoices(BROWSER_CHOICES.TOP_GROUP); }, orderByChoices(state) { const choices = []; for (const item of BROWSER_CHOICES.ORDER_BY) { if ( (item.value === "path" && !state.page.adminFlags.folderView) || (item.value === "child_count" && state.page.modelGroup === "c") || (item.value === "search_score" && (!state.settings.search || !state.page.fts)) ) { // denied order_by condition continue; } else { choices.push(item); } } return choices; }, orderByChoicesMaxLen() { return this._maxLenChoices(BROWSER_CHOICES.ORDER_BY); }, filterByChoicesMaxLen() { return this._maxLenChoices(BROWSER_CHOICES.BOOKMARK_FILTER); }, isAuthorized() { return useAuthStore().isAuthorized; }, isDynamicFiltersSelected(state) { for (const [name, array] of Object.entries(state.settings.filters)) { if (name !== "bookmark" && array && array.length > 0) { return true; } } return false; }, isFiltersClearable(state) { const isDefaultBookmarkValueSelected = DEFAULT_BOOKMARK_VALUES.has( state.settings.filters.bookmark, ); return !isDefaultBookmarkValueSelected || this.isDynamicFiltersSelected; }, lowestShownGroup(state) { let lowestGroup = "r"; const topGroupIndex = GROUPS_REVERSED.indexOf(state.settings.topGroup); for (const [index, group] of [...GROUPS_REVERSED].entries()) { const show = state.settings.show[group]; if (show) { if (index <= topGroupIndex) { lowestGroup = group; } break; } } return lowestGroup; }, isSearchMode(state) { return Boolean(state.settings.search); }, lastRoute(state) { const params = state.settings?.breadcrumbs?.at(-1) || globalThis.CODEX.LAST_ROUTE; const route = {}; if (params) { route.name = "browser"; delete params.name; route.params = params; } else { route.name = "home"; } return route; }, coverSettings(state) { const params = router.currentRoute.value.params; const group = params.group; if (group == "c") { return {}; } let keys = COVER_KEYS; const dc = state.settings.dynamicCovers; if (dc) { keys = [...keys, ...DYNAMIC_COVER_KEYS]; } const settings = this._filterSettings(state, keys); const pks = params.pks; if (!dc && group !== "r" && pks) { settings["parentRoute"] = { group, pks, }; } return settings; }, filterOnlySettings(state) { return this._filterSettings(state, FILTER_ONLY_KEYS); }, metadataSettings(state) { return this._filterSettings(state, METADATA_LOAD_KEYS); }, routeKey() { const params = router.currentRoute.value.params; return `${params.group}:${params.pk}:${params.page}`; }, }, actions: { /* * UTILITY */ _filterSettings(state, keys) { return Object.fromEntries( Object.entries(state.settings).filter(([k, v]) => { if (!keys.includes(k)) { return null; } if (k === "filters") { const usedFilters = {}; for (const [subkey, subvalue] of Object.entries(v)) { if (notEmptyOrBool(subvalue)) { usedFilters[subkey] = subvalue; } } v = usedFilters; } if (notEmptyOrBool(v)) { return [k, v]; } }), ); }, _maxLenChoices(choices) { let maxLen = 0; for (const item of choices) { if (item && item.title && item.title.length > maxLen) { maxLen = item.title.length; } } return maxLen; }, identifierSourceTitle(idSource) { if (!idSource) { return idSource; } const lowerIdSource = idSource.toLowerCase(); const longName = this.choices.static.identifierSources[lowerIdSource]; return longName || idSource; }, fixUniverseTitles(universes) { const items = []; for (const oldItem of universes) { const item = { ...oldItem }; if (item.designation) { item.name += ` (${item.designation})`; } items.push(item); } return items; }, setIsSearchOpen(value) { this.isSearchOpen = value; }, /* * VALIDATORS */ _isRootGroupEnabled(topGroup) { if (ALWAYS_ENABLED_TOP_GROUPS.has(topGroup)) { return true; } else if (topGroup == "f") { return this.page.adminFlags?.folderView; } else { return this.settings.show[topGroup]; } }, _validateSearch(data) { if (!this.settings.search && !data.search) { // if cleared search check for bad order_by if (this.settings.orderBy === "search_score") { data.orderBy = this.settings.topGroup === "f" ? "filename" : "sort_name"; } return; } else if (this.settings.search) { // Do not redirect to first search if already in search mode. return; } // If first search redirect to lowest group and change order data.orderBy = "search_score"; data.orderReverse = true; const group = router.currentRoute.value.params?.group; if ( NO_REDIRECT_ON_SEARCH_GROUPS.has(group) || group === this.lowestShownGroup ) { return; } return { params: { group: this.lowestShownGroup, pks: "0", page: "1" } }; }, _validateTopGroup(data, redirect) { /* * If the top group changed supergroups or we're at the root group and the new * top group is above the proper nav group */ const currentParams = router?.currentRoute?.value?.params; const currentGroup = currentParams?.group; const newTopGroup = data.topGroup; if (currentGroup === "r" && !NON_BROWSE_GROUPS.has(data.topGroup)) { return redirect; // r group can have any top groups? } const oldTopGroup = this.settings.topGroup; if ( oldTopGroup === newTopGroup || !newTopGroup || (!oldTopGroup && newTopGroup) || newTopGroup === currentGroup ) { /* * First url, initializing settings. * or * topGroup didn't change. * or * topGroup and group are the same, request is well formed. */ return redirect; } const oldTopGroupIndex = GROUPS_REVERSED.indexOf(oldTopGroup); const newTopGroupIndex = GROUPS_REVERSED.indexOf(newTopGroup); const newTopGroupIsBrowse = newTopGroupIndex !== -1; const oldAndNewBothBrowseGroups = newTopGroupIsBrowse && oldTopGroupIndex !== -1; // Construct and return new redirect let params; if (oldAndNewBothBrowseGroups) { if (oldTopGroupIndex < newTopGroupIndex) { /* * new top group is a parent (REVERSED) * Signal that we need new breadcrumbs. we do that by redirecting in place? */ params = currentParams; /* * Make a numeric page so won't trigger the redirect remover and will always * redirect so we repopulate breadcrumbs */ params.page = +params.page; } else { /* * New top group is a child (REVERSED) * Redirect to the new root. */ params = { group: "r", pks: "0", page: "1" }; } } else { // redirect to the new TopGroup const group = newTopGroupIsBrowse ? "r" : newTopGroup; params = { group, pks: "0", page: "1" }; } return { params }; }, getTopGroup(group) { // Similar to browser store logic. let topGroup; if (this.settings.topGroup === group || NON_BROWSE_GROUPS.has(group)) { topGroup = group; } else { const groupIndex = GROUPS_REVERSED.indexOf(group); // + 1; // Determine browse top group for (const testGroup of GROUPS_REVERSED.slice(groupIndex)) { if (testGroup !== "r" && this.settings.show[testGroup]) { topGroup = testGroup; break; } } } return topGroup; }, /* * MUTATIONS */ _addSettings(data) { this.$patch((state) => { for (let [key, value] of Object.entries(data)) { const newValue = typeof state.settings[key] === "object" && !Array.isArray(state.settings[key]) ? { ...state.settings[key], ...value } : value; if (!dequal(state.settings[key], newValue)) { state.settings[key] = newValue; } } if (state.settings.search && !state.isSearchOpen) { state.isSearchOpen = true; } }); this.startSearchHideTimeout(); }, _validateAndSaveSettings(data) { let redirect = this._validateSearch(data); redirect = this._validateTopGroup(data, redirect); if (dequal(redirect?.params, router.currentRoute.value.params)) { // not triggered if page is numeric, which is intended. redirect = undefined; } // Add settings if (data) { this._addSettings(data); } this.filterMode = "base"; return redirect; }, async setSettings(data) { // Save settings to state and re-get the objects. const redirect = this._validateAndSaveSettings(data); this.browserPageLoaded = true; if (redirect) { redirectRoute(redirect); } else { this.loadBrowserPage(undefined, true); } }, async clearOneFilter(filterName) { this.$patch((state) => { state.filterMode = "base"; state.settings.filters[filterName] = []; state.browserPageLoaded = true; }); await this.loadBrowserPage(undefined, true); }, async clearFilters(clearAll = false) { await API.resetSettings() .then((response) => { const data = response.data; this.$patch((state) => { state.settings.filters = data.filters; state.filterMode = "base"; if (clearAll) { state.settings.search = data.search; state.settings.orderBy = data.orderBy; state.settings.orderReverse = data.orderReverse; } state.browserPageLoaded = true; }); }) .catch(console.error); await this.loadBrowserPage(undefined, true); }, async setBookmarkFinished(params, finished) { if (!this.isAuthorized) { return; } await API.updateGroupBookmarks(params, this.filterOnlySettings, { finished, }).then(() => { this.loadBrowserPage(getTimestamp()); return true; }); }, clearSearchHideTimeout() { clearTimeout(this.searchHideTimeout); }, startSearchHideTimeout() { if (!this.isSearchOpen) { return; } const search = this.settings.search; if (search || this.isSearchHelpOpen) { this.clearSearchHideTimeout(); } else { this.searchHideTimeout = setTimeout(() => { const search = this.settings.search; if (!search) { this.setIsSearchOpen(false); } }, SEARCH_HIDE_TIMEOUT); } }, setSearchHelpOpen(value) { this.isSearchHelpOpen = value; this.startSearchHideTimeout(); }, /* * ROUTE */ routeToPage(page) { const route = structuredClone(router.currentRoute.value); route.params.page = page; router.push(route).catch(console.warn); }, handlePageError(error) { if (HTTP_REDIRECT_CODES.has(error?.response?.status)) { console.debug(error); const data = error.response.data; if (data.settings) { this.setSettings(data.settings); // Prevent settings reload in loadBrowserPage() erasing the set. this.browserPageLoaded = true; } if (data.route) { redirectRoute(data.route); } } else { return console.error(error); } }, /* * LOAD */ async loadSettings() { if (!this.isAuthorized) { return; } this.$patch((state) => { state.browserPageLoaded = false; state.choices.dynamic = undefined; }); const group = router?.currentRoute?.value.params?.group; await API.getSettings({ group }) .then((response) => { const data = response.data; const redirect = this._validateAndSaveSettings(data); this.browserPageLoaded = true; if (redirect) { return redirectRoute(redirect); } return this.loadBrowserPage(undefined); }) .catch((error) => { this.browserPageLoaded = true; return this.handlePageError(error); }); }, async loadBrowserPage(mtime, updateSettings = false) { // Get objects for the current route and settings. if (!this.isAuthorized) { return; } const route = router.currentRoute.value; if (!mtime) { mtime = route.query.ts; if (!mtime) { mtime = this.page.mtime; } } if (this.browserPageLoaded) { this.browserPageLoaded = false; } else { return this.loadSettings(); } await API.getBrowserPage(route.params, this.settings, mtime) .then((response) => { const { breadcrumbs, ...page } = response.data; this.$patch((state) => { state.settings.breadcrumbs = Object.freeze(breadcrumbs); state.page = Object.freeze(page); if ( (state.settings.orderBy === "search_score" && !page.fts) || (state.settings.orderBy === "child_count" && page.modelGroup === "c") ) { state.settings.orderBy = "sort_name"; } state.choices.dynamic = undefined; state.browserPageLoaded = true; }); return true; }) .catch(this.handlePageError); if (updateSettings) { API.updateSettings(this.settings); } }, async loadAvailableFilterChoices() { return await API.getAvailableFilterChoices( router.currentRoute.value.params, this.filterOnlySettings, this.page.mtime, ) .then((response) => { this.choices.dynamic = response.data; return true; }) .catch(console.error); }, async loadFilterChoices(fieldName) { return await API.getFilterChoices( router.currentRoute.value.params, fieldName, this.filterOnlySettings, this.page.mtime, ) .then((response) => { this.choices.dynamic[fieldName] = Object.freeze( response.data.choices, ); return true; }) .catch(console.error); }, async loadMtimes() { const params = router?.currentRoute?.value?.params; const routeGroup = params?.group; const group = routeGroup && routeGroup != "r" ? routeGroup : this.page.modelGroup; const pks = params?.pks || "0"; const arcs = [{ group, pks }]; return await COMMON_API.getMtime(arcs, this.filterOnlySettings) .then((response) => { const newMtime = response?.data?.maxMtime; if (newMtime !== this.page.mtime) { this.choices.dynamic = undefined; this.loadBrowserPage(newMtime); } return true; }) .catch(console.error); }, routeWithSettings(settings, route) { if (!route) { return; } this._validateAndSaveSettings(settings); // ignore redirect router.push(route).catch(console.error); }, /* * SAVED SETTINGS */ async loadSavedSettingsList() { if (!this.isAuthorized) { return; } await API.getSavedSettingsList() .then((response) => { this.savedSettingsList = Object.freeze( response.data.savedSettings || [], ); return true; }) .catch(console.error); }, async saveCurrentSettings(name) { if (!this.isAuthorized) { return; } await API.saveSettings(name) .then(() => { this.loadSavedSettingsList(); return true; }) .catch(console.error); }, async loadSavedSettings(pk) { if (!this.isAuthorized) { return; } await API.loadSavedSettings(pk) .then((response) => { const { settings, filterWarnings } = response.data; if (settings) { this._validateAndSaveSettings(settings); this.browserPageLoaded = true; this.loadBrowserPage(undefined, true); } if (filterWarnings && filterWarnings.length > 0) { this.savedSettingsSnackbar = filterWarnings; } return true; }) .catch(console.error); }, async deleteSavedSettings(pk) { if (!this.isAuthorized) { return; } await API.deleteSavedSettings(pk) .then(() => { this.loadSavedSettingsList(); return true; }) .catch(console.error); }, clearSavedSettingsSnackbar() { this.savedSettingsSnackbar = []; }, }, }); ================================================ FILE: frontend/src/stores/common.js ================================================ // Common store functions import { defineStore } from "pinia"; import API from "@/api/v3/common"; const ERROR_KEYS = Object.freeze([ "detail", "oldPassword", "password", "username", "passwordConfirm", "path", ]); const getErrors = (xiorError) => { let errors = []; if (xiorError && xiorError.response && xiorError.response.data) { let data = xiorError.response.data; for (const key of ERROR_KEYS) { if (key in data) { data = data[key]; break; } } errors = Array.isArray(data) ? data.flat() : [data]; } else { console.warn("Unable to parse error", xiorError); } if (errors.length === 0) { errors = ["Unknown error"]; } return errors; }; export const useCommonStore = defineStore("common", { state: () => ({ form: { errors: [], success: "", }, versions: { // This is injected by vite define installed: CODEX_PACKAGE_VERSION, latest: undefined, }, timestamp: Date.now(), isSettingsDrawerOpen: false, opdsURLs: undefined, }), actions: { async loadVersions() { await API.getVersions(this.timestamp) .then((response) => { const data = response.data; this.versions = data; return this.versions; }) .catch(console.error); }, setErrors(xiorError) { const errors = getErrors(xiorError); this.$patch((state) => { state.form.errors = errors; state.form.success = ""; }); }, setSuccess(success) { this.$patch((state) => { state.form.errors = []; state.form.success = success; }); }, clearErrors() { this.$patch((state) => { state.form.errors = []; state.form.success = ""; }); }, setTimestamp() { this.timestamp = Date.now(); }, setSettingsDrawerOpen(value) { this.isSettingsDrawerOpen = value; }, async loadOPDSURLs() { if (this.opdsURLs) { return; } await API.getOPDSURLs() .then((response) => { this.opdsURLs = Object.freeze({ ...response.data }); return this.opdsURLs; }) .catch(console.error); }, }, }); ================================================ FILE: frontend/src/stores/metadata.js ================================================ import { defineStore } from "pinia"; import { capitalCase } from "text-case"; import API from "@/api/v3/browser"; import { useBrowserStore } from "@/stores/browser"; const HEAD_ROLES = Object.freeze([ // writer "writer", "author", "plotter", "plot", "script", "scripter", "story", "interviewer", "translator", // art "artist", // pencil "penciller", "breakdowns", "pencils", "illustrator", "layouts", // ink "inker", "finishes", "inks", "embellisher", "inkAssists", // color "colorist", "colorer", "colourer", "colors", "colours", "colorDesigner", "colorFlats", "colorSeparations", "designer", "digitalArtTechnician", "grayTone", // letters "letterer", // cover "cover", "covers", "coverArtist", // producers "editor", "edits", "editing", ]); const TAGS = Object.freeze([ "genres", "characters", // identifiers "teams", "locations", "seriesGroups", "stories", "storyArcNumbers", "tags", "universes", ]); const MAIN_TAGS = Object.freeze(new Set(["Characters", "Teams"])); function compareByLastName(a, b) { const aLast = a.name.split(" ").pop(); const bLast = b.name.split(" ").pop(); return aLast.localeCompare(bLast); } export const useMetadataStore = defineStore("metadata", { state: () => ({ md: undefined, }), getters: { _mappedCredits(state) { const credits = {}; if (!state?.md?.credits) { return credits; } // Convert credits into a role based map for (const { role, person } of state.md.credits) { const roleName = role?.name ? role.name : "Other"; if (!(roleName in credits)) { credits[roleName] = []; } credits[roleName].push(person); } // Sort persons by last name for (const [roleName, persons] of Object.entries(credits)) { credits[roleName] = persons.sort(compareByLastName); } return credits; }, _sortedRoles(state) { // Sort the roles by special known order and then alphabetically. const roles = Object.keys(state._mappedCredits); const lowercaseRoleMap = {}; for (const originalRole of roles) { lowercaseRoleMap[originalRole.toLowerCase()] = originalRole; } const sortedRoles = new Set(); for (const role of HEAD_ROLES) { const originalRole = lowercaseRoleMap[role]; if (!originalRole) { continue; } sortedRoles.add(originalRole); delete lowercaseRoleMap[role]; if (!Object.keys(lowercaseRoleMap).length) { break; } } const sortedRolesList = [...sortedRoles]; sortedRolesList.sort(); return sortedRoles; }, credits(state) { return this.mapTag(state._mappedCredits, state._sortedRoles, "credits"); }, identifiers(state) { const identifiers = []; if (!state.md?.identifiers) { return identifiers; } for (const identifier of state.md.identifiers) { const parts = identifier.name.split(":"); const idType = parts[0]; const code = parts[1]; const finalTitle = useBrowserStore().identifierSourceTitle(idType); let name = ""; if (finalTitle && finalTitle !== "None") { name += finalTitle + ":"; } name += code; const item = { pk: identifier.pk, url: identifier.url, name, }; identifiers.push(item); } return identifiers; }, tags(state) { const tags = state.mapTag(state.md, TAGS); if (state.identifiers?.length) { tags["Identifiers"] = { filter: "identifiers", tags: this.identifiers, }; } for (const tagObj of Object.values(tags)) { tagObj.tags = tagObj.tags.sort((a, b) => a.name.localeCompare(b.name)); } return tags; }, }, actions: { async loadMetadata({ group, pks }) { await API.getMetadata({ group, pks }, useBrowserStore().metadataSettings) .then((response) => { const md = { ...response.data }; md.loaded = true; this.md = md; return true; }) .catch((error) => { console.error(error); this.clearMetadata(); }); }, clearMetadata() { this.md = undefined; }, getTagName(key) { var tagName; if (key === "storyArcNumbers") { tagName = "Story Arcs"; } else { tagName = capitalCase(key); } return tagName; }, /* *labelUniverses(tags) { * for (const tag of tags) { * tag.name += ` (${tag.designation})`; * } *}, */ markTagMain(tagName, tags) { const attr = "main" + tagName.slice(0, -1); const mainPk = this.md[attr]?.pk; const regularTags = []; var mainTags = []; for (const tag of tags) { if (mainPk === tag.pk) { mainTags.push(tag); } else { regularTags.push(tag); } } return { mainTags, regularTags }; }, mapTag(tagSource, keys, filter = undefined) { const tagMap = {}; for (const key of keys) { const tags = tagSource[key]; if (!tags?.length) { continue; } const tagName = this.getTagName(key); var mainTags = []; var regularTags = []; /* *if (tagName === "Universes") { * this.labelUniverses(tags); * regularTags = tags; *} else */ if (MAIN_TAGS.has(tagName)) { ({ mainTags, regularTags } = this.markTagMain(tagName, tags)); } else { regularTags = tags; } filter = filter ? filter : key; tagMap[tagName] = { filter, tags: regularTags, mainTags }; } return tagMap; }, lazyImport({ group, ids }) { return API.getLazyImport({ group, pks: ids }); }, }, }); ================================================ FILE: frontend/src/stores/reader.js ================================================ import { mdiBookArrowDown, mdiBookArrowUp } from "@mdi/js"; import { defineStore } from "pinia"; import { capitalCase } from "text-case"; import BROWSER_API from "@/api/v3/browser"; import COMMON_API from "@/api/v3/common"; import READER_API, { getComicPageSource } from "@/api/v3/reader"; import BROWSER_DEFAULTS from "@/choices/browser-defaults.json"; import READER_CHOICES from "@/choices/reader-choices.json"; import READER_DEFAULTS from "@/choices/reader-defaults.json"; import { getFullComicName } from "@/comic-name"; import router from "@/plugins/router"; import { useBrowserStore } from "@/stores/browser"; const SETTINGS_NULL_VALUES = Object.freeze(new Set(["", null, undefined])); const DIRECTION_REVERSE_MAP = Object.freeze({ prev: "next", next: "prev", }); const PREFETCH_LINK = Object.freeze({ rel: "prefetch", as: "image" }); export const VERTICAL_READING_DIRECTIONS = Object.freeze( new Set(["ttb", "btt"]), ); export const REVERSE_READING_DIRECTIONS = Object.freeze( new Set(["rtl", "btt"]), ); export const SCALE_DEFAULT = 1; const FIT_TO_CLASSES = Object.freeze({ S: "Screen", W: "Width", H: "Height", O: "Original", }); const BOOKS_NULL = Object.freeze({ current: undefined, prev: false, next: false, }); const ROUTES_NULL = Object.freeze({ prev: false, next: false, books: { prev: false, next: false, }, close: undefined, }); const DEFAULT_ARC = Object.freeze({ group: "s", ids: [], }); const ensureNoTwoPageVertical = (settings) => { // No two pages with vertical if ( VERTICAL_READING_DIRECTIONS.has(settings.readingDirection) && settings.twoPages ) { settings.twoPages = false; } }; export const useReaderStore = defineStore("reader", { state: () => ({ // static choices: { fitTo: READER_CHOICES.FIT_TO, readingDirection: READER_CHOICES.READING_DIRECTION, nullValues: SETTINGS_NULL_VALUES, }, // server globalSettings: { fitTo: READER_DEFAULTS.fitTo, twoPages: READER_DEFAULTS.twoPages, readingDirection: READER_DEFAULTS.readingDirection, readRtlInReverse: READER_DEFAULTS.readRtlInReverse, finishOnLastPage: READER_DEFAULTS.finishOnLastPage, pageTransition: READER_DEFAULTS.page_transition, cacheBook: READER_DEFAULTS.cache_book, }, intermediateSettings: {}, intermediateInfo: null, books: structuredClone(BOOKS_NULL), arcs: {}, arc: { group: "s", ids: [] }, mtime: 0, // local reader empty: false, page: undefined, routes: structuredClone(ROUTES_NULL), bookChange: undefined, reactWithScroll: false, clientSettings: { scale: SCALE_DEFAULT, }, showToolbars: false, settingsLoaded: false, bookSettings: {}, }), getters: { activeSettings(state) { // the empty settings guarantee here is for vitest. return this.getBookSettings(state.books.current) || {}; }, activeTitle(state) { const book = state.books.current; let title; if (book) { if (state.arc?.group != "f") { title = getFullComicName(book); } if (!title) { title = book.filename || ""; } } else { title = ""; } return title; }, isVertical(state) { return state.activeSettings.isVertical; }, isReadInReverse(state) { return state.activeSettings.isReadInReverse; }, routeParams(state) { return { pk: +state.books.current.pk, page: +state.page }; }, isPDF(state) { return state.books?.current?.fileType == "PDF"; }, cacheBook() { return ( this.activeSettings.cacheBook && !(this.isPDF && this.activeSettings.isVertical) ); }, isPagesNotRoutes(state) { return state.activeSettings.isVertical || this.cacheBook; }, isBTT(state) { return state.activeSettings.readingDirection === "btt"; }, isFirstPage(state) { return state.page === 0; }, isLastPage(state) { const maxPage = state.books.current ? state.books.current.maxPage : 0; const adj = state.activeSettings.twoPages ? 1 : 0; const limit = maxPage - adj; return this.page >= limit; }, closeBookRoute(state) { const route = { name: "browser" }; if (state.routes.close) { route.params = state.routes.close; } else { const breadcrumbs = useBrowserStore()?.settings?.breadcrumbs; route.params = breadcrumbs?.findLast((b) => b.group !== "c"); } if (route.params) { const cardPk = state.books?.current?.pk; if (cardPk) { route.hash = `#card-${cardPk}`; } } else { route.params = globalThis.CODEX.LAST_ROUTE || BROWSER_DEFAULTS.breadcrumbs[0]; } return route; }, }, actions: { /* * GETTER Algorithms */ setReadRTLInReverse(bookSettings) { // Special setting for RTL books return this.globalSettings.readRtlInReverse && bookSettings.readingDirection === "rtl" ? { ...bookSettings, readingDirection: "ltr" } : bookSettings; }, getBookSettings(book) { if (!book) { return {}; } if (!(book.pk in this.bookSettings)) { // Mask the book settings over intermediate over global settings. const resultSettings = structuredClone(SETTINGS_NULL_VALUES); let bookSettings = book?.settings || {}; bookSettings = this.setReadRTLInReverse(bookSettings); const allSettings = [ this.globalSettings, this.intermediateSettings, bookSettings, ]; for (const settings of allSettings) { for (const [key, val] of Object.entries(settings)) { if (!SETTINGS_NULL_VALUES.has(val)) { resultSettings[key] = val; } } } ensureNoTwoPageVertical(resultSettings); resultSettings.isVertical = VERTICAL_READING_DIRECTIONS.has( resultSettings.readingDirection, ); resultSettings.isReadInReverse = REVERSE_READING_DIRECTIONS.has( resultSettings.readingDirection, ); resultSettings.fitToClass = this.fitToClass(resultSettings); this.bookSettings[book.pk] = resultSettings; } return this.bookSettings[book.pk]; }, bookChangeLocation(direction) { let location; if (this.isBTT) { location = direction === "next" ? "left" : "right"; } else { location = direction === "next" ? "right" : "left"; } return location; }, bookChangeCursorClass(direction) { let cursor; if (this.activeSettings.isReadInReverse) { cursor = direction === "next" ? "up" : "down"; } else { cursor = direction === "next" ? "down" : "up"; } return cursor + "Cursor"; }, bookChangeShow(direction) { return direction === "prev" ? this.books.prev && this.isFirstPage : this.books.next && this.isLastPage; }, bookChangeIcon(direction) { let isDown = direction === "next"; if (this.isBTT) { isDown = !isDown; } return isDown ? mdiBookArrowDown : mdiBookArrowUp; }, /* * UTIL */ isCoverPage(book, page) { return ( (book.readLtr !== false && page === 0) || (book.readLtr && page === book.maxPage) ); }, _getRouteParams(book, activePage, direction) { const deltaModifier = direction === "prev" ? -1 : 1; let delta = 1; const bookSettings = this.getBookSettings(book); if ( bookSettings.twoPages && !this.isCoverPage(book, +activePage + deltaModifier) ) { delta = 2; } delta = delta * deltaModifier; const page = +activePage + delta; let routeParams = false; if (page >= 0 && page <= book.maxPage) { // make current book route routeParams = { pk: book.pk, page, }; } return routeParams; }, fitToClass(bookSettings) { const classes = {}; let fitTo; fitTo = this.clientSettings.scale > SCALE_DEFAULT ? "Orig" : FIT_TO_CLASSES[bookSettings.fitTo]; if (fitTo) { let fitToClass = "fitTo"; fitToClass += capitalCase(fitTo); if (bookSettings.isVertical) { fitToClass += "Vertical"; } else if (bookSettings.twoPages) { fitToClass += "Two"; } classes[fitToClass] = true; } return classes; }, /* * MUTATIONS */ _applyGlobalSettings(updates) { // Doing this with $patch breaks reactivity this.$patch((state) => { state.globalSettings = { ...state.globalSettings, ...updates, }; state.bookSettings = {}; state.empty = false; }); }, toggleToolbars() { this.showToolbars = !this.showToolbars; }, setShowToolbars() { this.showToolbars = true; }, reset() { // HACK because $reset doesn't seem to. this.$patch((state) => { state.arcs = {}; state.arc = DEFAULT_ARC; state.mtime = 0; state.settingsLoaded = false; state.books = structuredClone(BOOKS_NULL); state.routes = structuredClone(ROUTES_NULL); state.bookSettings = {}; state.intermediateSettings = {}; state.intermediateInfo = null; }); }, /* * ACTIONS */ _getBookRoutePage(book, isPrev) { let bookPage = 0; if ( (isPrev && book.readLtr !== false) || (!isPrev && book.readLtr === false) ) { const bookSettings = this.getBookSettings(book); const bookTwoPagesCorrection = bookSettings.twoPages ? -1 : 0; bookPage = book.maxPage + bookTwoPagesCorrection; } return bookPage; }, _getBookRoute(book, isPrev) { if (!book) { return false; } const page = this._getBookRoutePage(book, isPrev); return { pk: book.pk, page, }; }, _getBookRoutes(prevBook, nextBook) { return { prev: this._getBookRoute(prevBook, true), next: this._getBookRoute(nextBook, false), }; }, async setRoutesAndBookmarkPage(page) { const book = this.books.current; this.$patch((state) => { state.routes.prev = this._getRouteParams(book, page, "prev"); state.routes.next = this._getRouteParams(book, page, "next"); }); await this._setBookmarkPage(page).then(() => { this.bookChange = undefined; return true; }); }, setActivePage(page, reactWithScroll = true) { if (page < 0) { console.warn("Page out of bounds. Redirecting to 0."); return this.routeToPage(0); } else if (page > this.books.current.maxPage) { console.warn( `Page out of bounds. Redirecting to ${this.books.current.maxPage}.`, ); return this.routeToPage(this.books.current.maxPage); } this.reactWithScroll = Boolean(reactWithScroll); this.page = +page; this.setRoutesAndBookmarkPage(page); if (this.isPagesNotRoutes) { const route = { params: { pk: this.books.current.pk, page } }; const { href } = router.resolve(route); globalThis.history.pushState({}, undefined, href); } else { window.scrollTo(0, 0); } }, async loadGlobalSettings() { READER_API.getSettings(null, ["g"]) .then((response) => { const data = response.data?.scopes?.g; if (data) { this._applyGlobalSettings(data); } }) .catch(console.error); }, async loadBooks({ params, arc, mtime }) { if (!this.settingsLoaded) { this.loadGlobalSettings(); } const route = router.currentRoute.value; if (!params) { params = route.params; } const pk = params.pk; const settings = { arc }; if (!mtime) { mtime = route.query?.ts; if (!mtime) { mtime = this.mtime; } } await READER_API.getReaderInfo(pk, settings, mtime) .then((response) => { const data = response.data; const books = data.books; // Undefined settings breaks code. const allBooks = [books?.prev, books?.current, books?.next]; for (const book of allBooks) { if (book && !book.settings) { book.settings = {}; } } // Generate routes. const routesBooks = this._getBookRoutes(books.prev, books.next); this.$patch((state) => { state.books = books; state.arcs = data.arcs; state.arc = data.arc; state.routes.prev = this._getRouteParams( state.books.current, params.page, "prev", ); state.routes.next = this._getRouteParams( state.books.current, params.page, "next", ); state.routes.books = routesBooks; state.routes.close = data.closeRoute; state.empty = false; state.mtime = data.mtime; state.bookSettings = {}; }); // Load all three settings layers for the current comic. if (books.current?.pk) { this.loadAllSettings(+books.current.pk); } return true; }) .catch((error) => { console.debug(error); this.empty = true; }); }, async loadMtimes() { const arcs = []; for (const [group, arcIdInfos] of Object.entries(this.arcs)) { for (const pks of Object.keys(arcIdInfos)) { const arc = { group, pks }; arcs.push(arc); } } if (!arcs.length) { // No arcs is a 500 from the mtime api arcs.push({ r: "0" }); } return await COMMON_API.getMtime(arcs, {}) .then((response) => { const newMtime = response.data.maxMtime; if (newMtime !== this.mtime) { return this.loadBooks({ mtime: newMtime }); } return true; }) .catch(console.error); }, async _setBookmarkPage(page) { const groupParams = { group: "c", ids: [+this.books.current.pk] }; page = Math.max(Math.min(this.books.current.maxPage, page), 0); const updates = { page }; if ( this.activeSettings.finishOnLastPage && page >= this.books.current.maxPage ) { updates["finished"] = true; } await BROWSER_API.updateGroupBookmarks(groupParams, {}, updates); }, async updateComicSettings(updates) { const newBookSettings = { ...this.books.current.settings, ...updates, }; ensureNoTwoPageVertical(newBookSettings); const pk = +this.books.current.pk; const payload = { ...newBookSettings, scope: "c", scopePk: pk, }; await READER_API.updateSettings(payload) .then(() => { this.books.current.settings = newBookSettings; this.bookSettings = {}; }) .catch(console.error); }, setSettingsClient(updates) { this.clientSettings = { ...this.clientSettings, ...updates, }; }, async clearComicSettings() { const pk = +this.books?.current?.pk; if (!pk) return; await READER_API.resetSettings({ scope: "c", scopePk: pk }) .then(() => { this.$patch((state) => { if (state.books.current) { state.books.current.settings = {}; } state.bookSettings = {}; }); }) .catch(console.error); }, _getStoryArcPk() { // When browsing by story arc, pass the first arc id for scoped settings. if (this.arc?.group === "a" && this.arc?.ids?.length) { return this.arc.ids[0]; } return null; }, async loadAllSettings(pk) { if (!pk) { return; } const arcGroup = this.arc?.group || "s"; const storyArcPk = this._getStoryArcPk(); await READER_API.getSettings(pk, ["g", arcGroup, "c"], storyArcPk) .then((response) => { const data = response.data; const scopes = data.scopes || {}; const scopeInfo = data.scopeInfo || {}; // Determine the canonical intermediate scope key. const intermediateKey = ["s", "f", "a"].find((k) => k in scopes); this.$patch((state) => { if (scopes.g) { state.globalSettings = { ...state.globalSettings, ...scopes.g, }; } state.intermediateSettings = (intermediateKey && scopes[intermediateKey]) || {}; state.intermediateInfo = intermediateKey && scopeInfo[intermediateKey] ? { scopeType: intermediateKey, scopePk: scopeInfo[intermediateKey].pk, name: scopeInfo[intermediateKey].name, } : null; if (scopes.c && state.books.current) { state.books.current.settings = scopes.c; } state.bookSettings = {}; }); }) .catch(console.error); }, async updateIntermediateSettings(updates) { if (!this.intermediateInfo) { return; } const newSettings = { ...this.intermediateSettings, ...updates, }; ensureNoTwoPageVertical(newSettings); const payload = { ...newSettings, scope: this.intermediateInfo.scopeType, scopePk: this.intermediateInfo.scopePk, }; await READER_API.updateSettings(payload) .then(() => { this.$patch((state) => { state.intermediateSettings = newSettings; state.bookSettings = {}; }); }) .catch(console.error); }, async clearIntermediateSettings() { if (!this.intermediateInfo) return; await READER_API.resetSettings({ scope: this.intermediateInfo.scopeType, scopePk: this.intermediateInfo.scopePk, }) .then(() => { this.$patch((state) => { state.intermediateSettings = {}; state.bookSettings = {}; }); }) .catch(console.error); }, async clearGlobalSettings() { await READER_API.resetSettings({ scope: "g" }) .then((response) => { const data = response.data; this._applyGlobalSettings(data); this.clearComicSettings(); }) .catch(console.error); }, async updateGlobalSettings(updates) { const newGlobalSettings = { ...this.globalSettings, ...updates, }; const payload = { ...newGlobalSettings, scope: "g", }; await READER_API.updateSettings(payload) .then((response) => { const data = response.data; this._applyGlobalSettings(data); this.clearComicSettings(); }) .catch(console.error); }, setBookChangeFlag(direction) { direction = this.normalizeDirection(direction); this.bookChange = this.routes.books[direction] ? direction : undefined; }, linkLabel(direction, suffix) { const prefix = direction === "prev" ? "Previous" : "Next"; return `${prefix} ${suffix}`; }, /* * ROUTE */ normalizeDirection(direction) { return this.activeSettings.isReadInReverse ? DIRECTION_REVERSE_MAP[direction] : direction; }, _validateRoute(params, book) { if (!book) { book = this.books.current; } if (!book) { return {}; } const maxPage = book.maxPage ?? 0; if (params.page > maxPage) { params.page = maxPage; console.warn("Tried to navigate past the end of the book."); } else if (params.page < 0) { params.page = 0; console.warn("Tried to navigate before the beginning of the book."); } return params; }, _routeTo(params, book) { params = this._validateRoute(params, book); if (this.isPagesNotRoutes && +params.pk === this.books.current.pk) { this.setActivePage(+params.page, true); } else { const route = { name: "reader", params }; router.push(route).catch(console.debug); } }, routeToDirectionOne(direction) { // Special two page adjuster direction = this.normalizeDirection(direction); const delta = direction === "prev" ? -1 : 1; const page = (this.page += delta); if (page < 0 || page > this.books.current.maxPage) { return; } const params = { pk: this.books.current.pk, page: page, }; this._routeTo(params); }, routeToDirection(direction) { direction = this.normalizeDirection(direction); if (this.routes[direction]) { const params = this.routes[direction]; this._routeTo(params); } else if (this.routes.books[direction]) { if (this.bookChange === direction) { this._routeTo(this.routes.books[direction], this.books[direction]); } else { // Block book change routes unless the book change flag is set. this.setBookChangeFlag(direction); } } else { console.debug("No route to direction", direction); } }, routeToPage(page) { const params = { pk: this.books.current.pk, page }; this._routeTo(params); }, routeToBook(direction) { this._routeTo(this.routes.books[direction], this.books[direction]); }, toRoute(params) { return params ? { params } : {}; }, // PREFETCH _prefetchSrc(params, direction, bookChange = false, secondPage = false) { if (!params) { return false; } const book = bookChange ? this.books[direction] : this.books.current; if (!book) { return false; } let page = params.page; if (secondPage) { const settings = this.getBookSettings(book); if (!settings.twoPages) { return false; } page += 1; } if (page > book.maxPage) { return false; } const paramsPlus = { pk: params.pk, page, mtime: book.mtime }; return getComicPageSource(paramsPlus); }, prefetchLinks(params, direction, bookChange = false) { if (!bookChange && this.cacheBook) { return {}; } const sources = [ this._prefetchSrc(params, direction, bookChange, false), this._prefetchSrc(params, direction, bookChange, true), ]; const link = []; for (const href of sources) { if (href) { link.push({ ...PREFETCH_LINK, href }); } } return { link }; }, prefetchBook(book) { if (!this.cacheBook || book.fileType == "PDF") { return {}; } const pk = book.pk; const link = []; for (let page = 0; page <= book.maxPage; page++) { const params = { pk, page, mtime: book.mtime }; const href = getComicPageSource(params); if (href) { link.push({ ...PREFETCH_LINK, href }); } } return { link }; }, }, }); ================================================ FILE: frontend/src/stores/socket.js ================================================ import { useWebSocket } from "@vueuse/core"; import { defineStore } from "pinia"; import { WS_URL } from "@/api/v3/notify"; import { messages } from "@/choices/websocket-messages.json"; import router from "@/plugins/router"; import { useAuthStore } from "@/stores/auth"; import { useBrowserStore } from "@/stores/browser"; import { useCommonStore } from "@/stores/common"; import { useReaderStore } from "@/stores/reader"; import { store } from "@/stores/store"; const USER_GROUP_ROUTES = Object.freeze([ "admin-groups", "admin-users", "admin-libraries", ]); const HEARTBEAT_INTERVAL_MS = 5_000; const RECONNECT_RETRIES = Infinity; const RECONNECT_DELAY_MS = 3_000; // TODO move to some generic util. function currentRouteName() { return router?.currentRoute?.value?.name; } // Manual heartbeat — fire-and-forget empty string, no pong expected. // VueUse's built-in heartbeat option closes the connection if no pong is // received within its pongTimeout (default 1000ms), which breaks servers // that silently consume the ping without echoing anything back. let heartbeatTimer = 0; function startHeartbeat(ws) { stopHeartbeat(); heartbeatTimer = globalThis.setInterval(() => { if (ws.readyState === WebSocket.OPEN) { ws.send(""); } }, HEARTBEAT_INTERVAL_MS); } function stopHeartbeat() { globalThis.clearInterval(heartbeatTimer); heartbeatTimer = 0; } export const useSocketStore = defineStore("socket", () => { const { status, open } = useWebSocket(WS_URL, { immediate: true, autoReconnect: { retries: RECONNECT_RETRIES, delay: RECONNECT_DELAY_MS, onFailed() { console.error("[socket] Failed to reconnect after all retries"); }, }, onConnected(ws) { startHeartbeat(ws); console.debug("[socket] Connected."); }, onMessage(_ws, event) { dispatchMessage(event.data); }, onError(ws, event) { console.error("[socket] Error on", ws.url, event); }, onDisconnected(_ws, event) { stopHeartbeat(); console.debug( "[socket] Disconnected with code:", event.code, "reason:", event.reason || "(none)", ); }, }); // Lazy admin store loader async function getAdminStore() { if (!useAuthStore().isUserAdmin) return undefined; return import("@/stores/admin") .then((m) => m.useAdminStore()) .catch(console.error); } // Notification handlers async function adminLoadTables(tables) { const adminStore = await getAdminStore(); adminStore?.loadTables(tables); } async function adminLoadAllStatuses() { const adminStore = await getAdminStore(); adminStore?.loadAllStatuses(); } function reloadBrowser() { if (currentRouteName() === "browser") { useBrowserStore().loadMtimes(); } } function adminFlagsNotified() { useAuthStore().loadAdminFlags(); if (currentRouteName() === "admin-flags") { adminLoadTables(["Flag"]); } } function groupsNotified() { if (USER_GROUP_ROUTES.includes(currentRouteName())) { adminLoadTables(["Group"]); } } function usersNotified() { if (USER_GROUP_ROUTES.includes(currentRouteName())) { adminLoadTables(["User"]); } } async function libraryNotified() { useCommonStore().setTimestamp(); switch (currentRouteName()) { case "browser": useBrowserStore().loadMtimes(); break; case "reader": useReaderStore().loadMtimes(); break; case "admin-libraries": adminLoadTables(["Library", "FailedImport"]); break; case "admin-stats": { const adminStore = await getAdminStore(); adminStore?.loadStats(); break; } } } async function failedImportsNotified() { const adminStore = await getAdminStore(); if (adminStore) adminStore.unseenFailedImports = true; } // Message Dispatcher function dispatchMessage(message) { if (!message) return; console.debug("[socket] message:", message); switch (message) { case messages.ADMIN_FLAGS: adminFlagsNotified(); break; case messages.BOOKMARK: reloadBrowser(); break; case messages.COVERS: useCommonStore().setTimestamp(); reloadBrowser(); break; case messages.GROUPS: groupsNotified(); libraryNotified(); break; case messages.USERS: usersNotified(); libraryNotified(); break; case messages.LIBRARY: libraryNotified(); break; case messages.LIBRARIAN_STATUS: adminLoadTables(["ActiveLibrarianStatus"]); adminLoadAllStatuses(); break; case messages.FAILED_IMPORTS: failedImportsNotified(); break; default: console.debug("Unhandled WebSocket message:", message); } } // Public open for recconnect when user changes. const reopen = () => { // Don't force a reopen if we're in the middle of connecting. if (status.value == "CONNECTING") return; open(true); }; return { reopen }; }); export function useSocketStoreWithOut() { return useSocketStore(store); } ================================================ FILE: frontend/src/stores/store.js ================================================ import { createPinia } from "pinia"; const store = createPinia(); export function setupStore(app) { app.use(store); } export { store }; ================================================ FILE: frontend/src/util.js ================================================ // Utility functions export const range = (start, end = 0) => { start = Number.isInteger(start) && start >= 0 ? start : 0; end = Number.isInteger(end) && end >= 0 ? end : 0; const length = end > start ? Math.max(end - start, 0) : start; let result = [...Array.from({ length }).keys()]; if (end > 0) { result = result.map((i) => i + start); } return result; }; ================================================ FILE: frontend/tests/unit/reader-nav-button.test.js ================================================ /* Simple test just to play with vitest. */ import { createTestingPinia } from "@pinia/testing"; import { mount } from "@vue/test-utils"; import { expect, test } from "vitest"; import { createRouter, createWebHistory } from "vue-router"; import ReaderNavButton from "@/components/reader/toolbars/nav/reader-nav-button.vue"; import vuetify from "@/plugins/vuetify"; import { useReaderStore } from "@/stores/reader"; const BTN_DISABLED = "v-btn--disabled"; const setupRouter = function () { return createRouter({ history: createWebHistory(), routes: [ { path: "/c/:pk/:page", name: "reader", component: ReaderNavButton }, ], }); }; test("reader-nav-button", async () => { console.info("started test"); expect(ReaderNavButton).toBeTruthy(); const router = setupRouter(); router.push("/c/2/0"); await router.isReady(); const store = createTestingPinia({ initialState: { reader: { books: { current: { pk: 0 } }, page: 0 } }, }); const wrapper = mount(ReaderNavButton, { props: { value: 0, twoPages: false, }, global: { plugins: [router, vuetify, store], stubs: ["router-link", "router-view"], }, }); const readerStore = useReaderStore(); // test initial state expect(wrapper.html()).toMatchSnapshot(); expect(wrapper.text()).toContain("0"); // push new route const btn = wrapper.findComponent({ name: "v-btn" }); expect(btn.classes(BTN_DISABLED)).toBe(true); await wrapper.vm.$router.push({ params: { pk: 2, page: 10 }, }); readerStore.page = 10; await wrapper.vm.$nextTick(); expect(btn.classes(BTN_DISABLED)).toBe(false); // push back to original state await wrapper.vm.$router.push({ params: { pk: 2, page: 0 } }); readerStore.page = 0; await wrapper.vm.$nextTick(); expect(btn.classes(BTN_DISABLED)).toBe(true); expect(wrapper.text()).toContain("0"); }, 1000); export default {}; ================================================ FILE: frontend/vite.config.js ================================================ import { Unhead } from "@unhead/vue/vite"; import vue from "@vitejs/plugin-vue"; import checker from "vite-plugin-checker"; import fs from "fs"; import { hostname } from "os"; import path from "path"; import toml from "toml"; import { defineConfig } from "vite"; import { dynamicBase } from "vite-plugin-dynamic-base"; import { run } from "vite-plugin-run"; import vuetify from "vite-plugin-vuetify"; import package_json from "./package.json"; let rootPath; try { // for dev & build const CODEX_CONF = toml.parse(fs.readFileSync("../config/codex.toml")); rootPath = CODEX_CONF?.server?.url_path_prefix || ""; } catch { rootPath = ""; } const STATIC_DIR_NAME = "static"; const BASE_PATH = `${rootPath}/${STATIC_DIR_NAME}/`; const IS_TEST_ENV = process.env.NODE_ENV === "test"; const defineObj = { CODEX_PACKAGE_VERSION: JSON.stringify(package_json.version), }; if (IS_TEST_ENV) { defineObj.CODEX = { API_V3_PATH: JSON.stringify("dummy"), }; } console.info(defineObj); const config = defineConfig(({ mode }) => { const PROD = mode === "production"; const DEV = mode === "development"; // https://github.com/vitejs/vite/issues/19242 const ALLOWED_HOSTS = DEV ? [hostname().toLowerCase()] : []; let publicPathPrefix = "window.CODEX.APP_PATH"; if (PROD) { publicPathPrefix += ".substring(1)"; } const PUBLIC_PATH = `${publicPathPrefix} + "${STATIC_DIR_NAME}"`; return { base: BASE_PATH, build: { emptyOutDir: true, manifest: "manifest.json", minify: PROD, outDir: path.resolve("../codex/static_build"), rollupOptions: { // No need for index.html input: path.resolve("./src/main.js"), }, sourcemap: DEV, }, css: { devSourcemap: DEV, preprocessorOptions: { scss: { api: "modern", }, }, }, define: defineObj, plugins: [ vue(), vuetify({ autoImport: true }), checker({ eslint: { lintCommand: "eslint_d --cache .", // "./src/**/*.{js,vue}"', useFlatConfig: true, }, }), dynamicBase({ publicPath: PUBLIC_PATH, }), run([ { name: "Choices to JSON", run: ["../bin/build-choices.sh"], pattern: [ "../codex/choices.py", "../codex/choices_to_json.py", "../bin/build-choices.sh", ], }, ]), Unhead(), // ValidatePlugin(), ], publicDir: false, resolve: { alias: { "@": path.resolve(import.meta.dirname, "src"), }, }, server: { host: true, allowedHosts: ALLOWED_HOSTS, strictPort: true, }, test: { environment: "happy-dom", // deps: { inline: ["vuetify"] }, globals: true, server: { deps: { inline: ["vuetify"] } }, }, }; }); export default config; ================================================ FILE: mkdocs.yml ================================================ extra_css: - style.css markdown_extensions: - smarty - toc: permalink: true nav: - NEWS.md - DOCKER.md plugins: - minify: minify_html: true minify_js: true minify_css: true htmlmin_opts: remove_comments: true cache_safe: true css_files: - style.css - search site_author: AJ site_description: Codex Comicbook Reader Admin Guide site_name: Codex theme: color_mode: dark logo: codex/img/logo.svg name: mkdocs strict: true ================================================ FILE: mock_comics/__init__.py ================================================ """Create mock comics to test with.""" ================================================ FILE: mock_comics/bigbook.py ================================================ #!/usr/bin/env python3 """Create a big book for debugging page turning issues.""" import sys from pathlib import Path from PIL import Image, ImageDraw, ImageFont IMAGE_SIZE = (200, 100) # Width, Height BG_COLOR = (255, 255, 255) # White background TEXT_COLOR = (0, 0, 0) # Black text FONT_SIZE = 60 FONT = ImageFont.truetype("SFNS.ttf", FONT_SIZE) def create_number_images(output_dir: str, num_files: int): """Create number images.""" output_dir_path = Path(output_dir) output_dir_path.mkdir(exist_ok=True) for i in range(num_files): img = Image.new("RGB", IMAGE_SIZE, color=BG_COLOR) d = ImageDraw.Draw(img) text = str(i) # Calculate text size using textbbox for more accurate centering bbox = d.textbbox((0, 0), text, font=FONT) text_width = bbox[2] - bbox[0] text_height = bbox[3] - bbox[1] # Calculate position to center the text x = (IMAGE_SIZE[0] - text_width) / 2 y = (IMAGE_SIZE[1] - text_height) / 2 d.text((x, y), text, fill=TEXT_COLOR, font=FONT) # Generate a unique filename (the naming convention can be customized) filename = f"number_{i:04d}.jpg" path = output_dir_path / filename # Save the image as a JPEG file img.save(path, "JPEG") print(f"Created: {path}") def main(): """Use cli args.""" create_number_images(sys.argv[1], int(sys.argv[2])) if __name__ == "__main__": main() ================================================ FILE: mock_comics/mock_comics.py ================================================ #!/usr/bin/env python3 """Create large numbers of mocks comics.""" import random import string import sys import time import zipfile from io import BytesIO from pathlib import Path from xml.etree.ElementTree import Element, SubElement, tostringlist from zlib import adler32 from comicbox.enums.comicbox import IdSources from comicbox.identifiers.identifiers import IDENTIFIER_PARTS_MAP from comicbox.schemas.comicinfo import ComicInfoSchema from PIL import Image from pycountry import languages GROUPS = ("publisher", "imprint", "series") M2MS = ("characters", "genres", "locations", "tags", "teams") HEX_FILL = 8 PATH_STEP = 2 CHANCE_OF_NULL = 0.1 CHANCE_OF_BAD_TYPE = 0.2 CHOICES_STR = string.ascii_uppercase + string.digits CREDIT_TAGS = ( "Colorist", "CoverArtist", "Editor", "Inker", "Letterer", "Penciller", "Writer", ) TEXT_FIELDS = { "TEXT": ("Summary", "Notes", "ScanInformation"), "NAME_LISTS": ( "Genre", "Characters", "Tags", "Teams", "Locations", "StoryArc", "SeriesGroup", ), "ROLES": ("Colorist", "CoverArtist", "Editor", "Letterer", "Inker"), } RANGED_FIELDS = { "INTS": { "Number": 1024, "AlternateNumber": 1024, "Count": 1024, "AlternateCount": 1024, "Year": 2030, "Month": 12, "Day": 31, "Volume": 2030, "PageCount": 1024, "StoryArcNumber": 1024, }, "VARCHARS": { "AlternateSeries": 64, "LanguageISO": 2, "Format": 16, "Publisher": 24, "Imprint": 24, "Series": 64, "AgeRating": 18, "Title": 24, "Summary": 1024, "Notes": 128, "ScanInformation": 32, "MainCharacterOrTeam": 24, "Web": 64, }, "DECIMALS": {"CommunityRating": 100.0}, } BOOL_VALUES = ("yes", "no") MANGA_VALUES = (*BOOL_VALUES, "yesandrighttoleft", "yesrtl") NUM_M2M_NAMES = 20 NUM_CREDITS = 15 STATUS_DELAY = 5 LANG_KEYS = ("alpha_2", "alpha_3", "alpha_4", "name") def _get_all_language_codes(): langs = [] for lang in languages: for key in LANG_KEYS: if code := getattr(lang, key, None): break else: continue langs.append(code) return tuple(langs) LANG_LIST = _get_all_language_codes() COVER_RATIO = 1.5372233400402415 COVER_WIDTH = 250 COVER_HEIGHT = int(COVER_RATIO * COVER_WIDTH) FIVE_BY_FIVE_NIDS = frozenset( {enum.value for enum in (IdSources.METRON, IdSources.GCD, IdSources.LCG)} ) def is_valid(): """Determine if to make the tag null or the wrong type.""" n = random.random() if n < CHANCE_OF_NULL: return None return n >= CHANCE_OF_BAD_TYPE def rand_string(length, choices=CHOICES_STR): """Return a random string of arbitrary length.""" return "".join(random.choices(choices, k=length)) def rand_digits(length): """Return a random string of digits of arbitrary length.""" return rand_string(length, string.digits) def create_int(md, key, limit): """Add an int to the metadata.""" v = is_valid() if v is None: return if not v: value = rand_string(5) else: limit = round(limit * 1.2) value = random.randint(0, limit) md[key] = value def create_float(md, key, limit): """Add a float to the metadata.""" v = is_valid() if v is None: return value = rand_string(5) if not v else random.random() * limit * 1.1 md[key] = value def create_str(md, key, limit): """Add random string to the metadata.""" if is_valid() is None: return prefix = key + "_" length = random.randint(0, round(limit * 1.2)) - len(prefix) md[key] = prefix + rand_string(length) def create_web(md, key, _limit): """Create a valid parsable web key.""" if is_valid() is None: return nid = random.choice(tuple(IDENTIFIER_PARTS_MAP.keys())) if nid == IdSources.COMICVINE: id_key = "4000-" + rand_digits(6) elif nid in FIVE_BY_FIVE_NIDS: id_key = rand_string(5) + "/" + rand_string(5) elif nid == IdSources.ASIN: id_key = rand_string(10) elif nid == IdSources.COMIXOLOGY: id_key = "x/x/" + rand_string(10) elif nid == IdSources.ISBN: id_key = rand_digits(10) elif nid == IdSources.UPC: id_key = rand_digits(12) else: return id_parts = IDENTIFIER_PARTS_MAP[nid] url = id_parts.unparse_url("issue", id_key) md[key] = url def create_lang(md, key, _limit): """Add an iso language code.""" lang_code = random.choice(LANG_LIST) md[key] = lang_code def create_name_list(md, key): """Add m2m field the metadata.""" if is_valid() is None: return m2m = [] prefix = key + "_" for _ in range(random.randint(0, NUM_M2M_NAMES)): name = prefix + rand_string(64 - len(prefix)) m2m.append(name) md[key] = ",".join(m2m) def create_bool(md, key): """Create a boolean tag.""" v = is_valid() if v is None: return value = rand_string(5) if not v else BOOL_VALUES[random.randint(0, 1)] md[key] = value def create_manga(md): """Create a manga tag.""" v = is_valid() if v is None: return value = rand_string(5) if not v else MANGA_VALUES[random.randint(0, 3)] md["Manga"] = value def create_credits(md): """Add credits to the metadata.""" v = is_valid() if v is None: return for _ in range(random.randint(0, NUM_CREDITS)): role = random.choices(CREDIT_TAGS, k=1)[0] person = rand_string(round(64 * 1.1)) md[role] = person def create_metadata(): """Create ranomized metadata.""" md = {} for key, limit in RANGED_FIELDS["INTS"].items(): create_int(md, key, limit) for key in TEXT_FIELDS["TEXT"]: create_str(md, key, 100) for key, limit in RANGED_FIELDS["VARCHARS"].items(): if key == "LanguageISO": create_lang(md, key, limit) elif key == "Web": create_web(md, key, limit) else: create_str(md, key, limit) for key, limit in RANGED_FIELDS["DECIMALS"].items(): create_float(md, key, limit) for key in TEXT_FIELDS["NAME_LISTS"]: create_name_list(md, key) create_bool(md, "BlackAndWhite") create_manga(md) create_credits(md) root = Element(ComicInfoSchema.ROOT_TAG) root.attrib["xmlns:xsi"] = "http://www.w3.org/2001/XMLSchema-instance" root.attrib["xmlns:xsd"] = "http://www.w3.org/2001/XMLSchema" for key, val in md.items(): SubElement(root, key).text = str(val) return b"\n".join(tostringlist(root, encoding="utf-8")) def create_cover_page(): """Create a small randomly colored square image.""" r = random.randint(0, 255) g = random.randint(0, 255) b = random.randint(0, 255) img = Image.new("RGB", (COVER_WIDTH, COVER_HEIGHT), (r, g, b)) buf = BytesIO() img.save(buf, format="PNG") return buf.getvalue() def create_test_file(path): """Create a test file and write metadata to it.""" # Create an minimal file to write to path.parent.mkdir(parents=True, exist_ok=True) md_data = create_metadata() image_data = create_cover_page() with zipfile.ZipFile(path, mode="w") as zf: zf.writestr("ComicInfo.xml", md_data) zf.writestr("cover.jpg", image_data) def _hex_path(num): """Translate an integer into an efficient filesystem path.""" num_str = f"{num:07}" fnv = adler32(bytes(num_str, "utf-8")) hex_str = format(fnv, f"0{HEX_FILL}x") parts = [hex_str[i : i + PATH_STEP] for i in range(0, len(hex_str), PATH_STEP)] path = Path("/".join(parts)) return path.with_suffix(".cbz") def create_file(root, index): """Create a test file.""" path = root / _hex_path(index) path.parent.mkdir(exist_ok=True, parents=True) create_test_file(path) def main(args): """Process args and create mock comics.""" try: root = Path(args[1]) num_comics = int(args[2]) except Exception: print(f"{args[0]} ") sys.exit(1) since = time.time() index = 0 for index in range(num_comics): create_file(root, index) now = time.time() if now - since > STATUS_DELAY: print(f"{index + 1}/{num_comics}") since = now print(f"{index + 1}/{num_comics}") if __name__ == "__main__": main(sys.argv) ================================================ FILE: mock_comics/mock_comics.sh ================================================ #!/bin/bash # create a mock comics # mock_comics.sh set -euo pipefail uv run ./mock_comics.py "$@" ================================================ FILE: nginx/default.conf ================================================ ## Version 2025/05/31 - Changelog: https://github.com/linuxserver/docker-baseimage-alpine-nginx/commits/3.22/root/defaults/nginx/site-confs/default.conf.sample server { listen 80 default_server; listen [::]:80 default_server; listen 443 ssl default_server; listen [::]:443 ssl default_server; listen 443 quic reuseport default_server; listen [::]:443 quic reuseport default_server; server_name _; include /config/nginx/ssl.conf; access_log /dev/stdout; error_log /dev/stderr; proxy_set_header Host $http_host; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $server_name; proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Proto $scheme; # proxy_set_header X-Forwarded-Ssl on; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Scheme $scheme; # WS proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; gzip_comp_level 6; gzip_proxied any; gzip_types text/plain text/css text/js text/xml text/javascript application/javascript application/json application/xml image/svg+xml; location / { proxy_pass http://codex-dev-server.local:9810; proxy_set_header Remote-User 'admin'; } } ================================================ FILE: package.json ================================================ { "description": "linting for top level codex project & django backend", "type": "module", "scripts": { "fix": "eslint_d --cache --fix . && prettier --write . && bin/prettier-nginx.sh --write", "lint": "eslint_d --cache . && prettier --check . && bin/prettier-nginx.sh --check && remark --quiet ." }, "browserslist": [ "> 1%", "Firefox ESR", "last 2 versions", "not dead", "not op_mini all" ], "prettier": { "plugins": [ "@prettier/plugin-xml", "prettier-plugin-nginx", "prettier-plugin-packagejson", "prettier-plugin-sh", "prettier-plugin-toml" ], "overrides": [ { "files": [ "**/*.md" ], "options": { "proseWrap": "always", "tabWidth": 4 } }, { "files": [ "**/*.xsd" ], "options": { "printWidth": 120 } }, { "files": [ "**/*Dockerfile" ], "options": { "parser": "sh" } }, { "files": [ "**/nginx/http.d/**/*.conf" ], "options": { "parser": "nginx" } } ] }, "remarkConfig": { "plugins": [ "gfm", "lint", "preset-lint-consistent", "preset-lint-markdown-style-guide", "preset-lint-recommended", "preset-prettier" ] }, "devDependencies": { "@eslint-community/eslint-plugin-eslint-comments": "^4.7.1", "@eslint/js": "^10.0.1", "@eslint/json": "^1.2.0", "@fsouza/prettierd": "^0.27.0", "@prettier/plugin-xml": "^3.4.2", "@stylistic/eslint-plugin": "^5.10.0", "@vitest/eslint-plugin": "^1.6.16", "eslint": "^10.2.1", "eslint_d": "^15.0.2", "eslint-config-prettier": "^10.1.8", "eslint-plugin-array-func": "^5.1.1", "eslint-plugin-compat": "^7.0.1", "eslint-plugin-de-morgan": "^2.1.1", "eslint-plugin-depend": "^1.5.0", "eslint-plugin-html": "^8.1.4", "eslint-plugin-import-x": "^4.16.2", "eslint-plugin-math": "^0.13.1", "eslint-plugin-mdx": "^3.7.0", "eslint-plugin-no-secrets": "^2.3.3", "eslint-plugin-no-unsanitized": "^4.1.5", "eslint-plugin-no-use-extend-native": "^0.7.2", "eslint-plugin-perfectionist": "^5.9.0", "eslint-plugin-prettier": "^5.5.5", "eslint-plugin-promise": "^7.2.1", "eslint-plugin-regexp": "^3.1.0", "eslint-plugin-security": "^4.0.0", "eslint-plugin-sonarjs": "^4.0.3", "eslint-plugin-toml": "^1.3.1", "eslint-plugin-unicorn": "^64.0.0", "eslint-plugin-vue": "^10.9.0", "eslint-plugin-vue-scoped-css": "^3.0.0", "eslint-plugin-yml": "^3.3.1", "postcss-scss": "^4.0.9", "prettier": "^3.8.3", "prettier-plugin-nginx": "^1.0.3", "prettier-plugin-packagejson": "^3.0.2", "prettier-plugin-sh": "^0.18.1", "prettier-plugin-toml": "^2.0.6", "remark-cli": "^12.0.1", "remark-gfm": "^4.0.1", "remark-preset-lint-consistent": "^6.0.1", "remark-preset-lint-markdown-style-guide": "^6.0.1", "remark-preset-lint-recommended": "^7.0.1", "remark-preset-prettier": "^2.0.2", "svgo": "^4.0.1" } } ================================================ FILE: pyproject.toml ================================================ [project] classifiers = [ "Development Status :: 5 - Production/Stable", "Environment :: Web Environment", "Framework :: Django :: 5.1", "Intended Audience :: End Users/Desktop", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: JavaScript", "Programming Language :: Python :: 3 :: Only", "Topic :: Internet :: WWW/HTTP", "Topic :: Multimedia :: Graphics :: Viewers", ] dependencies = [ "bidict~=0.23", "channels~=4.2", "comicbox[pdf]~=2.2.1", "dateparser~=1.2", "django-cachalot~=2.8", "django-cors-headers~=4.0", "django-rest-registration~=0.9", "django-vite~=3.1", "djangorestframework-camel-case~=1.4", "djangorestframework~=3.16", "django~=6.0", "drf-spectacular~=0.28", "granian~=2.7", "humanize~=4.12", "lark~=1.2", "loguru~=0.7.3", "nh3~=0.2", "Pillow~=12.0", "psutil~=7.1", "servestatic[brotli]~=4.1", "setproctitle~=1.3.7", "tomli~=2.0; python_version < '3.11'", "typing-extensions~=4.14", "tzlocal~=5.0", "watchfiles>=1.1.1", "zipstream-ng~=1.8", ] description = "A comic archive web server." keywords = ["cb7", "cbr", "cbt", "cbz", "comic", "pdf"] readme = "README.md" requires-python = ">=3.12" license = "GPL-3.0-only" name = "codex" version = "1.10.12" [[project.authors]] name = "AJ Slater" email = "aj@slater.net" [project.urls] News = "https://codex-comic-reader.readthedocs.io/NEWS/" Documentation = "https://codex-comic-reader.readthedocs.io" Demo = "https://demo.codex-reader.app/" "Docker Image" = "https://github.com/ajslater/codex/pkgs/container/codex" Issues = "https://github.com/ajslater/codex/issues" Source = "https://github.com/ajslater/codex" [project.scripts] codex = "codex.run:main" [dependency-groups] dev = [ "granian[reload]", "infer-types~=1.0.0", "neovim~=0.3", "nplusone~=1.0", "picopt~=6.2.0", "semver~=3.0.4", "toml-cli~=0.7", "tomlkit~=0.14", ] build = ["cairosvg~=2.8"] lint = [ "basedpyright~=1.38", "codespell~=2.4", "complexipy~=5.1", "django-schema-graph~=3.1", "django-types~=0.22", "djangorestframework-types~=0.9", "djlint~=1.36", "icecream~=2.1", "mbake~=1.4.5", "pathspec~=1.1.0", "radon~=6.0", "ruff~=0.15", "types-python-dateutil~=2.9", "ty~=0.0.18", "vulture~=2.3", ] test = [ "coverage~=7.0", "pytest-asyncio~=1.0", "pytest-cov~=7.0", "pytest-django~=4.1", "pytest-gitignore~=1.3", ] docs = ["mkdocs-minify-plugin~=0.8", "mkdocs~=1.6"] ci = [] [build-system] requires = ["uv_build~=0.11.2"] build-backend = "uv_build" [tool.uv.build-backend] module-root = "" source-include = [ ".*ignore", ".circlci/**", ".env.platforms", ".github/**", ".picopt_treestamps.yaml", ".readthedocs.yaml", ".remarkignore", ".shellcheckrc", "Makefile", "NEWS.md", "bin/**", "bun.lock", "ci/**", "compose.yaml", "docs/DOCKER.md", "docs/NEWS", "docs/NEWS.md", "docs/README.md", "docs/WINDOWS.md", "docs/codex/img/logo.svg", "docs/requirements.txt", "docs/strange.jpg", "docs/style.css", "docs/style.material.css", "docs/style.mkdocs.css", "docs/style.readthedocs.css", "eslint.config.js", "frontend/**", "img/**", "mkdocks.yml", "mock_comics/**", "package.json", "strange.jpg", "tests/**", "uv.lock", "vulture_ignorelist.py", ] source-exclude = [ "**/*\\~", "**/.DS_Store", "codex/static_build", "frontend/node_modules", ] wheel-exclude = [ "**/*\\~", "**/.DS_Store", "codex/**/README.md", "codex/img", "codex/static_build", "codex/static_src", ] [tool.basedpyright] exclude = [ "**/.*", "**/__pycache__", "**/node_modules", "codex/_vendor", "codex/static", "codex/static_build", "comics", "config", "dist", "frontend", "site", "site", "test-results", "typings", "vulture_ignorelist.py", ] pythonVersion = "3.12" failOnWarnings = false reportAny = false reportExplicitAny = false reportImportCycles = true reportIncompatibleUnannotatedOverride = true reportMissingParameterType = false reportMissingTypeArgument = false reportMissingTypeStubs = false reportPrivateUsage = false reportUnannotatedClassAttribute = false reportUnknownArgumentType = false reportUnknownLambdaType = "hint" reportUnknownMemberType = false reportUnknownParameterType = false reportUnknownVariableType = false reportUnusedCallResult = false reportUnusedParameter = false [tool.codespell] builtin = "clear,code,rare" check-hidden = true ignore-words-list = "coverd,falsy,ro,searchd,thead,versio,ws" skip = "*~,.*,./bun.lock,./codex/_vendor,./codex/static,./codex/static_build,./comics,./config,./dist,./frontend/coverage,./node_modules,./package.json,./site,./test-results,/uv.lock" [tool.complexipy] failed = true paths = ["codex", "tests"] exclude = [ "*/.*", "frontend", "node_modules", "site", "test-results", "typings", "vulture_ignorelist.py", ] [tool.coverage.html] directory = "test-results/coverage" [tool.coverage.run] source = ["codex"] branch = true concurrency = ["multiprocessing"] omit = [ "*/.*", "*__pycache__*", "codex/_vendor/*", "codex/static/*", "codex/static_build/*", "comics/*", "dist/*", "frontend/*", "node_modules/*", "site/*", "test-results/*", "typings/*", "vulture_ignorelist.py", ] [tool.pytest] minversion = "9.0" addopts = [ "--cov", "--cov-append", "--cov-report=html", "--cov-report=term", "--junit-xml=test-results/pytest/results.xml", "--strict-config", "--strict-markers", "-ra", ] junit_family = "xunit2" strict = true testpaths = ["tests"] pythonpath = ["."] DJANGO_SETTINGS_MODULE = "codex.settings" [tool.radon] exclude = "*~,.*,.*/*,__pycache__/*,codex/_vendor/*,codex/static/*,codex/static_build/*,config/*,dist/*,frontend/*,node_modules/*,site/*,test-results/*,typings/*" [tool.ruff] builtins = ["ic"] extend-exclude = [ "**/.*", "**/__pycache__", "codex/_vendor", "codex/static", "codex/static_build", "dist", "frontend", "site", "test-results", "typings", "vulture_ignorelist.py", ] show-fixes = true target-version = "py312" [tool.ruff.lint] extend-ignore = [ "BLE001", "COM812", "COM819", "D203", "D206", "D212", "E111", "E114", "E117", "E501", "ISC001", "PERF203", "S101", "W191", ] extend-select = [ "A", "ARG", "ASYNC", "B", "BLE", "C4", "C90", "COM", "D", "DJ", "DTZ", "E", "EM", "ERA", "EXE", "F", "FA", "FBT", "FIX", "FLY", "FURB", "I", "ICN", "INP", "INT", "ISC", "LOG", "N", "PERF", "PGH", "PIE", "PLE", "PLR", "PLW", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "S", "SIM", "SLF", "SLOT", "T10", "T20", "TC", "TID", "TRY", "UP", "W", "YTT", ] external = ["V101"] task-tags = ["FIXME", "HACK", "TODO", "XXX", "http"] [tool.ruff.lint.flake8-self] ignore-names = ["_meta"] [tool.ruff.lint.flake8-unused-arguments] ignore-variadic-names = true [tool.ruff.lint.pycodestyle] ignore-overlong-task-comments = true [tool.ruff.lint.per-file-ignores] "tests/*" = ["D101", "D102", "T203"] "codex/migrations/*" = ["RUF012", "T201"] "codex/models/__init__.py" = ["F403"] "ci/cleanup-repo.py" = ["T201"] "mock_comics/*" = ["S311", "T201"] [tool.ruff.lint.pylint] max-args = 8 [tool.ty.environment] python = "./.venv" python-version = "3.12" [tool.ty.src] exclude = [ "**/.*", "**/__pycache__", "**/node_modules", "codex/_vendor", "codex/static", "codex/static_build", "comics", "dist", "frontend", "site", "test-results", "typings", "vulture_ignorelist.py", ] [tool.typos.default.extend-words] bookmarkd = "bookmarkd" coverd = "coverd" versio = "versio" [tool.vulture] exclude = [ "*/.*", "*/__pycache__*", "*/node_modules*", "codex/_vendor", "codex/static/", "codex/static_build/", "comics/", "dist/", "frontend/", "site", "test_results/", "typings/", ] min_confidence = 61 sort_by_size = true [tool.djlint] extend_exclude = "*/.*,__pycache__,codex/_vendor,codex/static_build,codex/static,./comics,dist,site,test-results,typings" ignore = "H030,H031" profile = "django" use_gitignore = true ================================================ FILE: tests/README.md ================================================ # Codex Tests Codex is light on tests right now Frontend tests live in /frontend/tests/ ================================================ FILE: tests/__init__.py ================================================ """Tests for codex.""" ================================================ FILE: tests/files/comicbox.example.yaml ================================================ appID: comicbox dev comicbox: age_rating: Everyone alternate_images: - alt0.jxl - alt1.jxl arcs: c: {} d: number: 1 identifiers: metron: key: "123" e: number: 3 f: number: 5 bookmark: 5 characters: Captain Science: {} Boy Empirical: identifiers: metron: key: 345 collection_title: The Big Omnibus country: US credits: Joe Orlando: roles: Writer: {} Wally Wood: roles: Penciller: {} credit_primaries: Joe Orlando: Writer Wally Wood: Penciller critical_rating: 10 date: cover_date: 1950-11-01 store_date: 1950-11-28 day: 1 month: 11 year: 1950 ext: cbz original_format: Trade Paperback genres: Science Fiction: {} identifiers: comicvine: key: "145269" url: https://comicvine.gamespot.com/c/4000-145269/ identifier_primary_source: source: metron url: "https://metron.cloud/" imprint: name: TestImprint issue: name: 1.2S number: 1.2 suffix: S language: en locations: The Moon: {} manga: No monochrome: false notes: Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] urn:comicvine:4000-145269 page_count: 36 pages: 0: { page_type: FrontCover, size: 429985 } 1: { size: 332936 } 2: { size: 458657 } 3: { size: 450456 } 4: { size: 436648 } 5: { size: 443725 } 6: { size: 469526 } 7: { size: 429811 } 8: { size: 445513 } 9: { size: 446292 } 10: { size: 458589 } 11: { size: 417623 } 12: { size: 445302 } 13: { size: 413271 } 14: { size: 434201 } 15: { size: 439049 } 16: { size: 485957 } 17: { size: 388379 } 18: { size: 368138 } 19: { size: 427874 } 20: { size: 422522 } 21: { size: 442529 } 22: { size: 423785 } 23: { size: 427980 } 24: { size: 445631 } 25: { size: 413615 } 26: { size: 417605 } 27: { size: 439120 } 28: { size: 451598 } 29: { size: 451550 } 30: { size: 438346 } 31: { size: 454914 } 32: { size: 428461 } 33: { size: 438091 } 34: { size: 353013 } 35: { size: 340840 } publisher: name: Youthful Adventure Stories prices: GB: 0.05 US: 0.1 protagonist: Captain Science reading_direction: ltr remainders: - crud reprints: - language: es series: sort_name: Capitan Sciencia volume: number: 1 - language: de series: name: Kapitän Wissenschaft review: It wasn't all bad. scan_info: Photocopied series: name: Captain Science series_groups: - science comics stories: The Beginning: {} summary: Captain Science's many scientific adventures tagger: comicbox dev tags: a: {} b: {} c: {} teams: Team Scientific Method: {} title: The Beginning universes: Young Adult Silly Universe: designation: 4242 updated_at: 1970-01-01T00:00:00Z volume: issue_count: 7 number: 1950 schema: "https://github.com/ajslater/comicbox/blob/main/schemas/v2.0/comicbox-v2.0.schema.json" ================================================ FILE: tests/files/comicbox.update.yaml ================================================ appID: comicbox dev comicbox: age_rating: Adult alternate_images: - alt0.jxl - alt2.jxl arcs: c: {} d: number: 1 identifiers: comicvine: key: "890" e: number: 3 identifiers: comicvine: key: "456" g: number: 5 bookmark: 8 characters: Captain Science: identifiers: metron: key: "123" collection_title: The Big Omnibus Part 2 country: GB credits: Joe Orlando: roles: Writer: identifiers: key: "890" identifiers: metron: key: "123" Wally Wood: roles: Penciller: {} credit_primaries: Joe Orlando: Writer Wally Wood: Penciller critical_rating: 5 date: cover_date: 1951-12-20 store_date: 1951-11-28 day: 20 month: 12 year: 1951 ext: cbz original_format: Hardcover genres: Science Fiction: identifiers: metron: key: "012" Mystery: {} identifiers: comicvine: key: "145265" metron: key: "999" imprint: name: TestImprint identifiers: metron: key: "123" issue: name: 2.2XXX number: 2.2 suffix: XXX language: fr locations: Mars: identifiers: comicvine: key: "111" manga: Yes monochrome: True notes: Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] urn:comicvine:4000-145265 page_count: 24 publisher: name: Youthful Adventure Stories identifiers: metron: key: "111" prices: GB: 0.25 protagonist: Team Cornish Game Hen reading_direction: rtl remainders: - cruddo reprints: - language: es series: sort_name: Capitan Sciencia volume: number: 1 - language: de series: name: Kapitän Wissenschaft review: Actually unreadable. scan_info: Digital series: name: Captain Science identifiers: comicvine: key: "333" series_groups: - adult comics stories: The Beginning: identifiers: metron: key: "555" The End: {} summary: Captain Science's many adult adventures tagger: comicbox dev tags: a: identifiers: metron: identifiers: key: "334" c: {} teams: Team Scientific Method: identifiers: metron: key: "123" Team Cornish Game Hen: title: The Beginning; The End universes: Young Adult Silly Universe: designation: 6969 identifiers: metron: key: 6969 updated_at: 1970-01-01T00:00:00Z volume: issue_count: 1 number: 1950 schema: "https://github.com/ajslater/comicbox/blob/main/schemas/v2.0/comicbox-v2.0.schema.json" ================================================ FILE: tests/importer/__init__.py ================================================ """Importer tests.""" ================================================ FILE: tests/importer/test_basic.py ================================================ """Test extract metadata importer.""" import os import shutil from abc import ABC from collections.abc import Mapping from copy import deepcopy from datetime import UTC, datetime from decimal import Decimal from pathlib import Path from pprint import pformat, pprint from threading import Event, Lock from types import MappingProxyType from typing import override from deepdiff import DeepDiff from django.core.cache import cache from django.test import TestCase from django.test.testcases import SerializeMixin from loguru import logger from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.scribe.importer.const import ( COMIC_FK_FIELD_NAMES, COMIC_M2M_FIELD_NAMES, CREATE_COMICS, CREATE_FKS, DELETE_M2MS, FIS, FTS_CREATE, FTS_CREATED_M2MS, FTS_EXISTING_M2MS, FTS_UPDATE, FTS_UPDATED_M2MS, LINK_FKS, LINK_M2MS, QUERY_MODELS, TOTAL, UPDATE_COMICS, UPDATE_FKS, ) from codex.librarian.scribe.importer.importer import ComicImporter from codex.librarian.scribe.importer.tasks import ImportTask from codex.models import ( AgeRating, Character, Comic, Country, Credit, CreditPerson, CreditRole, Folder, Genre, Identifier, IdentifierSource, Imprint, Language, Location, OriginalFormat, Publisher, ScanInfo, Series, SeriesGroup, Story, StoryArc, StoryArcNumber, Tag, Tagger, Team, Universe, Volume, ) from codex.models.comic import ComicFTS from codex.models.library import Library from codex.startup import codex_init TMP_DIR = Path("/tmp") / Path(__file__).stem # noqa: S108 LIBRARY_PATH = TMP_DIR FILES_DIR = Path(__file__).parent.parent / "files" COMIC_PATH = FILES_DIR / "comicbox-2-example.cbz" PATH = str(LIBRARY_PATH / "test.cbz") PATH_PARENTS = {(str(Path(PATH).parent),)} PATH_PARENTS_QUERY = {(str(Path(PATH).parent),): set()} COMPLEX_FIELD_NAMES = frozenset({"credits", "story_arc_numbers", "identifiers"}) AGGREGATED = MappingProxyType( { FIS: {}, QUERY_MODELS: { AgeRating: {("Everyone",): set()}, # alternate_images ignored Character: { ("Captain Science",): {(None,)}, ("Boy Empirical",): {(("metron", "character", "345"),)}, }, Folder: deepcopy(PATH_PARENTS_QUERY), Country: {("US",): set()}, Credit: { ("Joe Orlando", "Writer"): set(), ("Wally Wood", "Penciller"): set(), }, CreditPerson: {("Joe Orlando",): {(None,)}, ("Wally Wood",): {(None,)}}, CreditRole: {("Penciller",): {(None,)}, ("Writer",): {(None,)}}, # credit_primaries ignored Genre: {("Science Fiction",): {(None,)}}, Language: {("en",): set()}, Location: {("The Moon",): {(None,)}}, OriginalFormat: {("Trade Paperback",): set()}, Tag: { ("a",): { (None,), }, ("b",): { (None,), }, ("c",): { (None,), }, }, Tagger: {("comicbox dev",): set()}, Publisher: {("Youthful Adventure Stories",): {(None,)}}, Imprint: { ( "Youthful Adventure Stories", "TestImprint", ): {(None,)}, }, ScanInfo: {("Photocopied",): set()}, Series: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", ): { (None, None), } }, SeriesGroup: {("science comics",): set()}, Volume: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, ): {(7,)}, }, Team: {("Team Scientific Method",): {(None,)}}, IdentifierSource: {("comicvine",): set(), ("metron",): set()}, Identifier: { ( "comicvine", "comic", "145269", ): { ("https://comicvine.gamespot.com/c/4000-145269/",), }, ("metron", "character", "345"): { ("https://metron.cloud/character/345",) }, ("metron", "storyarc", "123"): {("https://metron.cloud/arc/123",)}, }, # prices future Story: { ("The Beginning",): {(None,)}, }, StoryArc: { ("c",): {(None,)}, ("d",): {(("metron", "storyarc", "123"),)}, ("e",): {(None,)}, ("f",): {(None,)}, }, StoryArcNumber: { ("c", None): set(), ("d", 1): set(), ("e", 3): set(), ("f", 5): set(), }, Universe: {("Young Adult Silly Universe",): {(None, "4242")}}, }, CREATE_COMICS: { PATH: { # bookmark ignore # cover_date ignore? "collection_title": "The Big Omnibus", "critical_rating": Decimal("10.00"), "day": 1, # ext ignore "file_type": "CBZ", "issue_number": Decimal("1.2"), "issue_suffix": "S", "metadata_mtime": datetime(2025, 8, 6, 12, 29, 6, tzinfo=UTC), "monochrome": False, "month": 11, "name": "The Beginning", "notes": ( "Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] " "urn:comicvine:4000-145269" ), "page_count": 1, "path": PATH, "reading_direction": "ltr", "review": "It wasn't all bad.", # remainders ignore "summary": "Captain Science's many scientific adventures", # updated_at ignore "year": 1950, } }, LINK_FKS: { PATH: { "age_rating": ("Everyone",), "country": ("US",), "imprint": ("Youthful Adventure Stories", "TestImprint"), "language": ("en",), "original_format": ("Trade Paperback",), "publisher": ("Youthful Adventure Stories",), "protagonist": ("Captain Science",), "scan_info": ("Photocopied",), "series": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", ), "tagger": ("comicbox dev",), "volume": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, ), } }, LINK_M2MS: { PATH: { "characters": { ("Boy Empirical",), ("Captain Science",), }, "credits": { ("Wally Wood", "Penciller"), ("Joe Orlando", "Writer"), }, "genres": {("Science Fiction",)}, "folders": deepcopy(PATH_PARENTS), "identifiers": { ( "comicvine", "comic", "145269", ) }, "locations": {("The Moon",)}, # reprints add "series_groups": {("science comics",)}, "stories": {("The Beginning",)}, "story_arc_numbers": { ("c", None), ("d", 1), ("e", 3), ("f", 5), }, "tags": {("a",), ("b",), ("c",)}, "teams": {("Team Scientific Method",)}, "universes": {("Young Adult Silly Universe",)}, } }, }, ) QUERIED = MappingProxyType( { CREATE_COMICS: { PATH: { "collection_title": "The Big Omnibus", "critical_rating": Decimal("10.00"), "day": 1, "file_type": "CBZ", "issue_number": Decimal("1.2"), "issue_suffix": "S", "metadata_mtime": datetime(2025, 8, 6, 12, 29, 6, tzinfo=UTC), "monochrome": False, "month": 11, "name": "The Beginning", "notes": ( "Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] " "urn:comicvine:4000-145269" ), "page_count": 1, "path": PATH, "reading_direction": "ltr", "review": "It wasn't all bad.", "summary": "Captain Science's many scientific adventures", "year": 1950, } }, UPDATE_COMICS: {}, FIS: {}, CREATE_FKS: { AgeRating: {("Everyone",)}, IdentifierSource: {("metron",), ("comicvine",)}, Identifier: { ( "comicvine", "comic", "145269", "https://comicvine.gamespot.com/c/4000-145269/", ), ("metron", "character", "345", "https://metron.cloud/character/345"), ("metron", "storyarc", "123", "https://metron.cloud/arc/123"), }, Location: {("The Moon", None)}, Character: { ( "Boy Empirical", ( "metron", "character", "345", ), ), ("Captain Science", None), }, Country: {("US",)}, CreditPerson: {("Joe Orlando", None), ("Wally Wood", None)}, CreditRole: {("Penciller", None), ("Writer", None)}, Credit: { ("Joe Orlando", "Writer"), ("Wally Wood", "Penciller"), }, Genre: {("Science Fiction", None)}, Language: {("en",)}, OriginalFormat: {("Trade Paperback",)}, ScanInfo: {("Photocopied",)}, SeriesGroup: {("science comics",)}, Story: {("The Beginning", None)}, StoryArc: { ("c", None), ("d", ("metron", "storyarc", "123")), ("e", None), ("f", None), }, StoryArcNumber: { ("c", None), ("d", 1), ("e", 3), ("f", 5), }, Tag: {("a", None), ("b", None), ("c", None)}, Tagger: {("comicbox dev",)}, Team: {("Team Scientific Method", None)}, Universe: {("Young Adult Silly Universe", None, "4242")}, Folder: deepcopy(PATH_PARENTS), Publisher: {("Youthful Adventure Stories", None)}, Imprint: {("Youthful Adventure Stories", "TestImprint", None)}, Series: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", None, None, ) }, Volume: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, 7, ) }, TOTAL: 41, }, UPDATE_FKS: { TOTAL: 0, }, LINK_FKS: { PATH: { "age_rating": ("Everyone",), "country": ("US",), "imprint": ("Youthful Adventure Stories", "TestImprint"), "language": ("en",), "original_format": ("Trade Paperback",), "protagonist": ("Captain Science",), "publisher": ("Youthful Adventure Stories",), "scan_info": ("Photocopied",), "series": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", ), "tagger": ("comicbox dev",), "volume": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, ), } }, LINK_M2MS: { PATH: { "characters": {("Boy Empirical",), ("Captain Science",)}, "credits": {("Joe Orlando", "Writer"), ("Wally Wood", "Penciller")}, "folders": deepcopy(PATH_PARENTS), "genres": {("Science Fiction",)}, "identifiers": {("comicvine", "comic", "145269")}, "locations": {("The Moon",)}, "series_groups": {("science comics",)}, "stories": {("The Beginning",)}, "story_arc_numbers": {("c", None), ("d", 1), ("e", 3), ("f", 5)}, "tags": {("a",), ("b",), ("c",)}, "teams": {("Team Scientific Method",)}, "universes": {("Young Adult Silly Universe",)}, } }, DELETE_M2MS: {}, FTS_EXISTING_M2MS: {}, } ) CREATED_FK = MappingProxyType( { CREATE_COMICS: { PATH: { "collection_title": "The Big Omnibus", "critical_rating": Decimal("10.00"), "day": 1, "file_type": "CBZ", "issue_number": Decimal("1.2"), "issue_suffix": "S", "metadata_mtime": datetime(2025, 8, 6, 12, 29, 6, tzinfo=UTC), "monochrome": False, "month": 11, "name": "The Beginning", "notes": ( "Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] " "urn:comicvine:4000-145269" ), "page_count": 1, "path": PATH, "reading_direction": "ltr", "review": "It wasn't all bad.", "summary": "Captain Science's many scientific adventures", "year": 1950, } }, UPDATE_COMICS: {}, FIS: {}, LINK_FKS: { PATH: { "age_rating": ("Everyone",), "country": ("US",), "imprint": ("Youthful Adventure Stories", "TestImprint"), "language": ("en",), "original_format": ("Trade Paperback",), "protagonist": ("Captain Science",), "publisher": ("Youthful Adventure Stories",), "scan_info": ("Photocopied",), "series": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", ), "tagger": ("comicbox dev",), "volume": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, ), } }, LINK_M2MS: { PATH: { "characters": {("Boy Empirical",), ("Captain Science",)}, "credits": {("Joe Orlando", "Writer"), ("Wally Wood", "Penciller")}, "folders": deepcopy(PATH_PARENTS), "genres": {("Science Fiction",)}, "identifiers": {("comicvine", "comic", "145269")}, "locations": {("The Moon",)}, "series_groups": {("science comics",)}, "stories": {("The Beginning",)}, "story_arc_numbers": {("c", None), ("d", 1), ("e", 3), ("f", 5)}, "tags": {("a",), ("b",), ("c",)}, "teams": {("Team Scientific Method",)}, "universes": {("Young Adult Silly Universe",)}, } }, DELETE_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_UPDATED_M2MS: {}, FTS_CREATED_M2MS: {"universes": {("Young Adult Silly Universe",): ("4242",)}}, } ) CREATED_COMICS = MappingProxyType( { FIS: {}, LINK_M2MS: { PATH: { "characters": {("Boy Empirical",), ("Captain Science",)}, "credits": {("Joe Orlando", "Writer"), ("Wally Wood", "Penciller")}, "folders": PATH_PARENTS, "genres": {("Science Fiction",)}, "identifiers": {("comicvine", "comic", "145269")}, "locations": {("The Moon",)}, "series_groups": {("science comics",)}, "stories": {("The Beginning",)}, "story_arc_numbers": {("c", None), ("d", 1), ("e", 3), ("f", 5)}, "tags": {("a",), ("b",), ("c",)}, "teams": {("Team Scientific Method",)}, "universes": {("Young Adult Silly Universe",)}, } }, DELETE_M2MS: {}, FTS_CREATE: { 1: { "age_rating": ("Everyone",), "collection_title": ("The Big Omnibus",), "country": ("US",), "imprint": ("TestImprint",), "language": ("en",), "name": ("The Beginning",), "original_format": ("Trade Paperback",), "publisher": ("Youthful Adventure Stories",), "review": ("It wasn't all bad.",), "scan_info": ("Photocopied",), "series": ("Captain Science",), "summary": ("Captain Science's many scientific adventures",), "tagger": ("comicbox dev",), } }, FTS_UPDATED_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_CREATED_M2MS: {"universes": {("Young Adult Silly Universe",): ("4242",)}}, } ) LINKED_COMICS = MappingProxyType( { FIS: {}, FTS_CREATE: { 1: { "age_rating": ("Everyone",), "characters": ("Boy Empirical", "Captain Science"), "collection_title": ("The Big Omnibus",), "country": ("US",), "credits": ("Joe Orlando", "Wally Wood"), "genres": ("Science Fiction",), "imprint": ("TestImprint",), "language": ("en",), "locations": ("The Moon",), "name": ("The Beginning",), "original_format": ("Trade Paperback",), "publisher": ("Youthful Adventure Stories",), "review": ("It wasn't all bad.",), "scan_info": ("Photocopied",), "series": ("Captain Science",), "series_groups": ("science comics",), "sources": ("comicvine",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e", "f"), "summary": ("Captain Science's many scientific adventures",), "tagger": ("comicbox dev",), "tags": ("a", "b", "c"), "teams": ("Team Scientific Method",), "universes": ( "4242", "Young Adult Silly Universe", ), }, }, FTS_EXISTING_M2MS: {}, FTS_CREATED_M2MS: {"universes": {}}, FTS_UPDATED_M2MS: {}, } ) FAILED_IMPORTS = MappingProxyType( { FTS_CREATE: { 1: { "age_rating": ("Everyone",), "characters": ("Boy Empirical", "Captain Science"), "collection_title": ("The Big Omnibus",), "country": ("US",), "credits": ("Joe Orlando", "Wally Wood"), "genres": ("Science Fiction",), "imprint": ("TestImprint",), "language": ("en",), "locations": ("The Moon",), "name": ("The Beginning",), "original_format": ("Trade Paperback",), "publisher": ("Youthful Adventure Stories",), "review": ("It wasn't all bad.",), "scan_info": ("Photocopied",), "series": ("Captain Science",), "series_groups": ("science comics",), "sources": ("comicvine",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e", "f"), "summary": ("Captain Science's many scientific adventures",), "tagger": ("comicbox dev",), "tags": ("a", "b", "c"), "teams": ("Team Scientific Method",), "universes": ( "4242", "Young Adult Silly Universe", ), }, }, FTS_EXISTING_M2MS: {}, FTS_UPDATED_M2MS: {}, FTS_CREATED_M2MS: {"universes": {}}, } ) DELETED_COMICS = MappingProxyType( { FTS_CREATE: { 1: { "age_rating": ("Everyone",), "characters": ("Boy Empirical", "Captain Science"), "collection_title": ("The Big Omnibus",), "country": ("US",), "credits": ("Joe Orlando", "Wally Wood"), "genres": ("Science Fiction",), "imprint": ("TestImprint",), "language": ("en",), "locations": ("The Moon",), "name": ("The Beginning",), "original_format": ("Trade Paperback",), "publisher": ("Youthful Adventure Stories",), "review": ("It wasn't all bad.",), "scan_info": ("Photocopied",), "series": ("Captain Science",), "series_groups": ("science comics",), "sources": ("comicvine",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e", "f"), "summary": ("Captain Science's many scientific adventures",), "tagger": ("comicbox dev",), "tags": ("a", "b", "c"), "teams": ("Team Scientific Method",), "universes": ( "4242", "Young Adult Silly Universe", ), }, }, FTS_EXISTING_M2MS: {}, FTS_UPDATED_M2MS: {}, FTS_CREATED_M2MS: {"universes": {}}, } ) FTSED = MappingProxyType({}) _FK_VALUE_POS = MappingProxyType( { "volume": -2, } ) _COMPLEX_KEYS = frozenset({"credits", "identifiers", "story_arc_numbers"}) _COMICFTS_IGNORE_KEYS = ("comic_id", "updated_at", "created_at") def create_fts_strings(md, pk): """Create the fts values for comparison from the md.""" fts_md = {} for key in (FTS_CREATE, FTS_UPDATE): for field_name, values in md.get(key, {}).get(pk, {}).items(): all_values = values + md[FTS_EXISTING_M2MS].get(pk, {}).get(field_name, ()) fts_md[field_name] = ",".join(sorted(all_values)) return fts_md FTS_FINAL_BASIC = MappingProxyType( { **create_fts_strings(LINKED_COMICS, 1), "country": "US,United States", "language": "en,English", "sources": "Comic Vine,comicvine", } ) def create_compare_comic_values(agg_md): """Create the comic values for comparison from the aggregate md.""" comic = {**agg_md[CREATE_COMICS][PATH]} for key, values in agg_md[LINK_FKS][PATH].items(): pos = _FK_VALUE_POS.get(key, -1) comic[key] = values[pos] for key, values in agg_md[LINK_M2MS][PATH].items(): if key in _COMPLEX_KEYS: final_val = tuple(sorted(values)) elif key == "folders": final_val = tuple(sorted(Path(val[0]).name for val in values)) else: final_val = tuple(sorted(val[0] for val in values)) comic[key] = final_val comic.pop("protagonist", None) return MappingProxyType(comic) COMIC_VALUES_BASIC = create_compare_comic_values(AGGREGATED) def write_out(old_md, new_md): """Write out formatted dicts for diffing.""" Path("old.py").write_text(pformat(old_md)) Path("new.py").write_text(pformat(new_md)) def diff_assert(old_md: Mapping, new_md: Mapping, phase: str): """Assert a deep diff.""" if diff := DeepDiff(old_md, new_md): print("Test Phase:", phase) # noqa: T201 pprint(diff) pprint(new_md) if os.environ.get("CODEX_TEST_IMPORT_WRITE"): write_out(old_md, new_md) assert not diff def _test_comic_creation_field_protagonist(comic, field_name, test_value): diff = (comic.main_character and test_value == comic.main_character.name) or ( comic.main_team and test_value == comic.main_team.name ) if not diff: print( # noqa: T201 f"{field_name}:{test_value} == {comic.main_character=} or {comic.main_team=}" ) assert diff def _test_comic_creation_field_complex(field_name: str, value): if field_name == "credits": value = tuple( sorted((subval.person.name, subval.role.name) for subval in value.all()) ) elif field_name == "story_arc_numbers": value = tuple( sorted((subval.story_arc.name, subval.number) for subval in value.all()) ) elif field_name == "identifiers": value = tuple( sorted( (subval.source.name, subval.id_type, subval.key) for subval in value.all() ) ) return value def _test_comic_creation_field(comic, field_name, test_value): if field_name == "protagonist": _test_comic_creation_field_protagonist(comic, field_name, test_value) return value = getattr(comic, field_name) if field_name in COMIC_FK_FIELD_NAMES: value = value.name elif field_name in COMPLEX_FIELD_NAMES: value = _test_comic_creation_field_complex(field_name, value) elif field_name in COMIC_M2M_FIELD_NAMES: value = tuple(sorted(subval.name for subval in value.all())) diff = test_value == value if not diff: print(f"{field_name} {test_value=} {value=}") # noqa: T201 assert diff def export_test_comic_creation(values_const: MappingProxyType): """Test Comic Creation and Linking.""" qs = Comic.objects.prefetch_related(*COMIC_M2M_FIELD_NAMES).select_related( *COMIC_FK_FIELD_NAMES, "main_team", "main_character" ) comic = qs.get(path=PATH) # values = qs.filter(path=PATH).values().first() debug # ic(values) debug for field_name, test_value in values_const.items(): _test_comic_creation_field(comic, field_name, test_value) return comic def export_test_fts_creation(values_const: MappingProxyType, comic: Comic): """FTS Values.""" qs = ComicFTS.objects.filter(comic_id=comic.pk).values() comicfts = qs[0] fts_values = deepcopy(dict(values_const)) for key in _COMICFTS_IGNORE_KEYS: comicfts.pop(key) diff_assert(fts_values, comicfts, "COMIC_FTS") class BaseTestImporter(SerializeMixin, TestCase, ABC): lockfile = __file__ @override @classmethod def setUpTestData(cls): super().setUpTestData() cache.clear() codex_init() TMP_DIR.mkdir(exist_ok=True) shutil.copy(COMIC_PATH, PATH) @override @classmethod def tearDownClass(cls): shutil.rmtree(TMP_DIR) @override def setUp(self): cache.clear() Library.objects.create(path=LIBRARY_PATH) pk = Library.objects.get(path=LIBRARY_PATH).pk self.task = ImportTask(library_id=pk, files_modified=frozenset({PATH})) self.importer = ComicImporter( self.task, logger, LIBRARIAN_QUEUE, Lock(), Event() ) @override def tearDown(self): super().tearDown() cache.clear() class TestImporterBasic(BaseTestImporter): def test_import(self): # Extract and Aggregate self.importer.read() md = MappingProxyType(self.importer.metadata) diff_assert(AGGREGATED, md, "AGGREGATED") # Query self.importer.query() md = MappingProxyType(self.importer.metadata) diff_assert(QUERIED, md, "QUERIED") # Create Fks self.importer.create_all_fks() self.importer.update_all_fks() md = MappingProxyType(self.importer.metadata) diff_assert(CREATED_FK, md, "CREATED_FK") assert Identifier.objects.count() == 3 # noqa: PLR2004 # Create Comics self.importer.update_comics() self.importer.create_comics() md = MappingProxyType(self.importer.metadata) diff_assert(CREATED_COMICS, md, "CREATED_COMICS") comic = Comic.objects.get(path=PATH) assert comic assert comic.year == 1950 # noqa: PLR2004 # Link self.importer.link() md = MappingProxyType(self.importer.metadata) diff_assert(LINKED_COMICS, md, "LINKED_COMICS") # Fail imports self.importer.fail_imports() md = MappingProxyType(self.importer.metadata) diff_assert(FAILED_IMPORTS, md, "FAILED_IMPORTS") # Delete self.importer.delete() md = MappingProxyType(self.importer.metadata) diff_assert(DELETED_COMICS, md, "DELETED_COMICS") comic = export_test_comic_creation(COMIC_VALUES_BASIC) # FTS self.importer.full_text_search() md = MappingProxyType(self.importer.metadata) diff_assert(FTSED, md, "FTSED") export_test_fts_creation(FTS_FINAL_BASIC, comic) ================================================ FILE: tests/importer/test_update_all.py ================================================ """Test extract metadata importer.""" import shutil from copy import deepcopy from datetime import UTC, datetime from decimal import Decimal from types import MappingProxyType from typing import override from codex.librarian.scribe.importer.const import ( CREATE_COMICS, CREATE_FKS, DELETE_M2MS, FIS, FTS_CREATED_M2MS, FTS_EXISTING_M2MS, FTS_UPDATE, FTS_UPDATED_M2MS, LINK_FKS, LINK_M2MS, QUERY_MODELS, TOTAL, UPDATE_COMICS, UPDATE_FKS, ) from codex.models import ( AgeRating, Character, Comic, Country, Credit, CreditPerson, CreditRole, Folder, Genre, Identifier, IdentifierSource, Imprint, Language, Location, OriginalFormat, Publisher, ScanInfo, Series, SeriesGroup, Story, StoryArc, StoryArcNumber, Tag, Tagger, Universe, Volume, ) from tests.importer.test_basic import ( COMIC_VALUES_BASIC, FTS_FINAL_BASIC, PATH, PATH_PARENTS, PATH_PARENTS_QUERY, create_compare_comic_values, create_fts_strings, diff_assert, export_test_comic_creation, export_test_fts_creation, ) from tests.importer.test_update_none import UPDATE_PATH, BaseTestImporterUpdate AGGREGATED_UPDATE_ALL = MappingProxyType( { CREATE_COMICS: { PATH: { "collection_title": "The Big Omnibus Part 2", "critical_rating": Decimal("5.00"), "day": 20, "issue_number": Decimal("2.2"), "issue_suffix": "XXX", "metadata_mtime": datetime(2025, 8, 6, 12, 37, 6, tzinfo=UTC), "monochrome": True, "month": 12, "name": "The Beginning; The End", "notes": ( "Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] " "urn:comicvine:4000-145265" ), "page_count": 0, "path": PATH, "reading_direction": "rtl", "review": "Actually unreadable.", "summary": "Captain Science's many adult adventures", "year": 1951, } }, FIS: {}, LINK_FKS: { PATH: { "age_rating": ("Adult",), "country": ("GB",), "imprint": ("Youthful Adventure Stories", "TestImprint"), "language": ("fr",), "original_format": ("Hardcover",), "protagonist": ("Team Cornish Game Hen",), "publisher": ("Youthful Adventure Stories",), "scan_info": ("Digital",), "series": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", ), "tagger": ("comicbox dev",), "volume": ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, ), } }, LINK_M2MS: { PATH: { "characters": {("Captain Science",)}, "credits": {("Joe Orlando", "Writer"), ("Wally Wood", "Penciller")}, "folders": deepcopy(PATH_PARENTS), "genres": {("Mystery",), ("Science Fiction",)}, "identifiers": { ("comicvine", "comic", "145265"), ("metron", "comic", "999"), }, "locations": {("Mars",)}, "series_groups": {("adult comics",)}, "stories": {("The Beginning",), ("The End",)}, "story_arc_numbers": {("c", None), ("d", 1), ("e", 3), ("g", 5)}, "universes": {("Young Adult Silly Universe",)}, "tags": {("a",), ("c",)}, } }, QUERY_MODELS: { IdentifierSource: {("comicvine",): set(), ("metron",): set()}, Identifier: { ("comicvine", "comic", "145265"): { ("https://comicvine.gamespot.com/c/4000-145265/",), }, ("comicvine", "location", "111"): { ("https://comicvine.gamespot.com/c/4020-111/",), }, ("comicvine", "series", "333"): { ("https://comicvine.gamespot.com/c/4050-333/",), }, ("comicvine", "storyarc", "456"): { ("https://comicvine.gamespot.com/c/4045-456/",), }, ("comicvine", "storyarc", "890"): { ("https://comicvine.gamespot.com/c/4045-890/",), }, ( "metron", "character", "123", ): { ("https://metron.cloud/character/123",), }, ("metron", "comic", "999"): { ("https://metron.cloud/issue/999",), }, ("metron", "creditperson", "123"): { ("https://metron.cloud/creator/123",), }, ("metron", "genre", "012"): { ("https://metron.cloud/genre/012",), }, ("metron", "imprint", "123"): { ("https://metron.cloud/imprint/123",), }, ("metron", "publisher", "111"): { ("https://metron.cloud/publisher/111",), }, ("metron", "story", "555"): { ("https://metron.cloud/story/555",), }, }, Publisher: { ("Youthful Adventure Stories",): { (("metron", "publisher", "111"),), } }, Imprint: { ("Youthful Adventure Stories", "TestImprint"): { (("metron", "imprint", "123"),), } }, Series: { ("Youthful Adventure Stories", "TestImprint", "Captain Science"): { ( ("comicvine", "series", "333"), None, ), } }, Language: {("fr",): set()}, Volume: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, ): { (1,), } }, Folder: deepcopy(PATH_PARENTS_QUERY), AgeRating: {("Adult",): set()}, Character: { ("Captain Science",): { (("metron", "character", "123"),), } }, CreditPerson: { ("Joe Orlando",): { (("metron", "creditperson", "123"),), }, ("Wally Wood",): { (None,), }, }, CreditRole: { ("Penciller",): { (None,), }, ("Writer",): { (None,), }, }, Credit: { ("Joe Orlando", "Writer"): set(), ("Wally Wood", "Penciller"): set(), }, Country: {("GB",): set()}, Genre: { ("Mystery",): { (None,), }, ("Science Fiction",): { (("metron", "genre", "012"),), }, }, Location: { ("Mars",): { (("comicvine", "location", "111"),), } }, OriginalFormat: {("Hardcover",): set()}, ScanInfo: {("Digital",): set()}, SeriesGroup: {("adult comics",): set()}, Story: { ("The Beginning",): { (("metron", "story", "555"),), }, ("The End",): { (None,), }, }, StoryArc: { ("c",): { (None,), }, ("d",): { (("comicvine", "storyarc", "890"),), }, ("e",): { (("comicvine", "storyarc", "456"),), }, ("g",): { (None,), }, }, StoryArcNumber: { ("c", None): set(), ("d", 1): set(), ("e", 3): set(), ("g", 5): set(), }, Tag: { ("a",): { (None,), }, ("c",): { (None,), }, }, Tagger: {("comicbox dev",): set()}, Universe: { ("Young Adult Silly Universe",): { (None, "6969"), } }, }, } ) QUERIED_UPDATE_ALL = MappingProxyType( { CREATE_COMICS: {}, CREATE_FKS: { Identifier: { ( "comicvine", "comic", "145265", "https://comicvine.gamespot.com/c/4000-145265/", ), ( "comicvine", "location", "111", "https://comicvine.gamespot.com/c/4020-111/", ), ( "comicvine", "series", "333", "https://comicvine.gamespot.com/c/4050-333/", ), ( "comicvine", "storyarc", "456", "https://comicvine.gamespot.com/c/4045-456/", ), ( "comicvine", "storyarc", "890", "https://comicvine.gamespot.com/c/4045-890/", ), ("metron", "character", "123", "https://metron.cloud/character/123"), ("metron", "comic", "999", "https://metron.cloud/issue/999"), ("metron", "creditperson", "123", "https://metron.cloud/creator/123"), ("metron", "genre", "012", "https://metron.cloud/genre/012"), ("metron", "imprint", "123", "https://metron.cloud/imprint/123"), ("metron", "publisher", "111", "https://metron.cloud/publisher/111"), ("metron", "story", "555", "https://metron.cloud/story/555"), }, Language: {("fr",)}, AgeRating: {("Adult",)}, Country: {("GB",)}, Genre: {("Mystery", None)}, Location: {("Mars", ("comicvine", "location", "111"))}, OriginalFormat: {("Hardcover",)}, ScanInfo: {("Digital",)}, SeriesGroup: {("adult comics",)}, Story: {("The End", None)}, StoryArc: {("g", None)}, StoryArcNumber: {("g", 5)}, "total": 23, }, DELETE_M2MS: { "characters": {(1, 1)}, "identifiers": {(1, 1)}, "locations": {(1, 1)}, "series_groups": {(1, 1)}, "story_arc_numbers": {(1, 4)}, "tags": {(1, 2)}, }, FIS: {}, LINK_FKS: { PATH: { "age_rating": ("Adult",), "country": ("GB",), "language": ("fr",), "original_format": ("Hardcover",), "protagonist": ("Team Cornish Game Hen",), "scan_info": ("Digital",), } }, FTS_EXISTING_M2MS: { 1: { "characters": ("Captain Science",), "genres": ("Science Fiction",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e"), "tags": ("a", "c"), } }, LINK_M2MS: { PATH: { "genres": { ("Mystery",), }, "identifiers": { ("metron", "comic", "999"), ("comicvine", "comic", "145265"), }, "locations": {("Mars",)}, "series_groups": {("adult comics",)}, "stories": {("The End",)}, "story_arc_numbers": {("g", 5)}, } }, UPDATE_COMICS: { 1: { "collection_title": "The Big Omnibus Part 2", "critical_rating": Decimal("5.00"), "day": 20, "issue_number": Decimal("2.2"), "issue_suffix": "XXX", "metadata_mtime": datetime(2025, 8, 6, 12, 37, 6, tzinfo=UTC), "monochrome": True, "month": 12, "name": "The Beginning; The End", "notes": ( "Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] " "urn:comicvine:4000-145265" ), "page_count": 0, "reading_direction": "rtl", "review": "Actually unreadable.", "summary": "Captain Science's many adult adventures", "year": 1951, } }, UPDATE_FKS: { Publisher: {("Youthful Adventure Stories", ("metron", "publisher", "111"))}, Imprint: { ( "Youthful Adventure Stories", "TestImprint", ("metron", "imprint", "123"), ) }, Series: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", ("comicvine", "series", "333"), None, ) }, Character: {("Captain Science", ("metron", "character", "123"))}, CreditPerson: {("Joe Orlando", ("metron", "creditperson", "123"))}, Genre: {("Science Fiction", ("metron", "genre", "012"))}, Story: {("The Beginning", ("metron", "story", "555"))}, StoryArc: { ("d", ("comicvine", "storyarc", "890")), ("e", ("comicvine", "storyarc", "456")), }, Universe: {("Young Adult Silly Universe", None, "6969")}, Volume: { ( "Youthful Adventure Stories", "TestImprint", "Captain Science", 1950, None, 1, ) }, TOTAL: 11, }, } ) CREATED_FK_UPDATE_ALL = MappingProxyType( { CREATE_COMICS: {}, DELETE_M2MS: { "characters": {(1, 1)}, "identifiers": {(1, 1)}, "locations": {(1, 1)}, "series_groups": {(1, 1)}, "story_arc_numbers": {(1, 4)}, "tags": {(1, 2)}, }, FIS: {}, LINK_FKS: { PATH: { "age_rating": ("Adult",), "country": ("GB",), "language": ("fr",), "original_format": ("Hardcover",), "protagonist": ("Team Cornish Game Hen",), "scan_info": ("Digital",), } }, FTS_EXISTING_M2MS: { 1: { "characters": ("Captain Science",), "genres": ("Science Fiction",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e"), "tags": ("a", "c"), } }, LINK_M2MS: { PATH: { "genres": { ("Mystery",), }, "identifiers": { ("comicvine", "comic", "145265"), ("metron", "comic", "999"), }, "locations": {("Mars",)}, "series_groups": {("adult comics",)}, "stories": {("The End",)}, "story_arc_numbers": {("g", 5)}, } }, UPDATE_COMICS: { 1: { "collection_title": "The Big Omnibus Part 2", "critical_rating": Decimal("5.00"), "day": 20, "issue_number": Decimal("2.2"), "issue_suffix": "XXX", "metadata_mtime": datetime(2025, 8, 6, 12, 37, 6, tzinfo=UTC), "monochrome": True, "month": 12, "name": "The Beginning; The End", "notes": ( "Tagged with comicbox dev on 1970-01-01T00:00:00 [Issue ID 145269] " "urn:comicvine:4000-145265" ), "page_count": 0, "reading_direction": "rtl", "review": "Actually unreadable.", "summary": "Captain Science's many adult adventures", "year": 1951, } }, FTS_UPDATED_M2MS: {"universes": {1}}, FTS_CREATED_M2MS: {}, } ) CREATED_COMICS_UPDATE_ALL = MappingProxyType( { DELETE_M2MS: { "characters": {(1, 1)}, "identifiers": {(1, 1)}, "locations": {(1, 1)}, "series_groups": {(1, 1)}, "story_arc_numbers": {(1, 4)}, "tags": {(1, 2)}, }, FIS: {}, FTS_EXISTING_M2MS: { 1: { "characters": ("Captain Science",), "genres": ("Science Fiction",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e"), "tags": ("a", "c"), } }, LINK_M2MS: { PATH: { "genres": { ("Mystery",), }, "identifiers": { ("comicvine", "comic", "145265"), ("metron", "comic", "999"), }, "locations": {("Mars",)}, "series_groups": {("adult comics",)}, "stories": {("The End",)}, "story_arc_numbers": {("g", 5)}, } }, FTS_UPDATE: { 1: { "age_rating": ("Adult",), "collection_title": ("The Big Omnibus Part 2",), "country": ("GB",), "language": ("fr",), "name": ("The Beginning; The End",), "original_format": ("Hardcover",), "review": ("Actually unreadable.",), "scan_info": ("Digital",), "summary": ("Captain Science's many adult adventures",), } }, FTS_UPDATED_M2MS: {"universes": {1}}, FTS_CREATED_M2MS: {}, } ) LINKED_COMICS_UPDATE_ALL = MappingProxyType( { FIS: {}, FTS_EXISTING_M2MS: { 1: { "characters": ("Captain Science",), "genres": ("Science Fiction",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e"), "tags": ("a", "c"), } }, FTS_UPDATE: { 1: { "age_rating": ("Adult",), "collection_title": ("The Big Omnibus Part 2",), "country": ("GB",), "genres": ("Mystery",), "language": ("fr",), "locations": ("Mars",), "name": ("The Beginning; The End",), "original_format": ("Hardcover",), "review": ("Actually unreadable.",), "scan_info": ("Digital",), "series_groups": ("adult comics",), "sources": ("comicvine", "metron"), "stories": ("The End",), "story_arcs": ("g",), "summary": ("Captain Science's many adult adventures",), } }, FTS_UPDATED_M2MS: {"universes": {1}}, FTS_CREATED_M2MS: {}, } ) FAILED_IMPORTS_UPDATE_ALL = MappingProxyType( { FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: { 1: { "characters": ("Captain Science",), "genres": ("Science Fiction",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e"), "tags": ("a", "c"), } }, FTS_UPDATE: { 1: { "age_rating": ("Adult",), "collection_title": ("The Big Omnibus Part 2",), "country": ("GB",), "genres": ("Mystery",), "language": ("fr",), "locations": ("Mars",), "name": ("The Beginning; The End",), "original_format": ("Hardcover",), "review": ("Actually unreadable.",), "scan_info": ("Digital",), "series_groups": ("adult comics",), "sources": ("comicvine", "metron"), "stories": ("The End",), "story_arcs": ("g",), "summary": ("Captain Science's many adult adventures",), } }, FTS_UPDATED_M2MS: {"universes": {1}}, } ) DELETED_COMICS_UPDATE_ALL = MappingProxyType( { FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: { 1: { "characters": ("Captain Science",), "genres": ("Science Fiction",), "stories": ("The Beginning",), "story_arcs": ("c", "d", "e"), "tags": ("a", "c"), } }, FTS_UPDATE: { 1: { "age_rating": ("Adult",), "collection_title": ("The Big Omnibus Part 2",), "country": ("GB",), "genres": ("Mystery",), "language": ("fr",), "locations": ("Mars",), "name": ("The Beginning; The End",), "original_format": ("Hardcover",), "review": ("Actually unreadable.",), "scan_info": ("Digital",), "series_groups": ("adult comics",), "sources": ("comicvine", "metron"), "stories": ("The End",), "story_arcs": ("g",), "summary": ("Captain Science's many adult adventures",), } }, FTS_UPDATED_M2MS: {"universes": {1}}, } ) FTSED_UPDATE_ALL = MappingProxyType({}) _FK_VALUE_POS = MappingProxyType( { "volume": -2, } ) _COMPLEX_KEYS = frozenset({"credits", "identifiers", "story_arc_numbers"}) _COMICFTS_IGNORE_KEYS = ("comic_id", "updated_at", "created_at") FTS_FINAL_UPDATE_ALL = MappingProxyType( { **FTS_FINAL_BASIC, **create_fts_strings(LINKED_COMICS_UPDATE_ALL, 1), "country": "GB,United Kingdom", "language": "fr,French", "sources": "Comic Vine,Metron,comicvine,metron", "universes": "6969,Young Adult Silly Universe", "characters": "Captain Science", "tags": "a,c", } ) _COMIC_VALUES_UPDATE_ALL = create_compare_comic_values(AGGREGATED_UPDATE_ALL) COMIC_VALUES_UPDATE_ALL = MappingProxyType( {**COMIC_VALUES_BASIC, **_COMIC_VALUES_UPDATE_ALL} ) class TestImporterUpdateAll(BaseTestImporterUpdate): @override def setUp(self): super().setUp() shutil.copy(UPDATE_PATH, PATH) def test_update_all(self): self.importer.read() md = MappingProxyType(self.importer.metadata) diff_assert(AGGREGATED_UPDATE_ALL, md, "AGGREGATED_UPDATE_ALL") # Query self.importer.query() md = MappingProxyType(self.importer.metadata) diff_assert(QUERIED_UPDATE_ALL, md, "QUERIED_UPDATE_ALL") # Create & Update Fks self.importer.create_all_fks() self.importer.update_all_fks() md = MappingProxyType(self.importer.metadata) diff_assert(CREATED_FK_UPDATE_ALL, md, "CREATED_FK_UPDATE_ALL") assert Identifier.objects.count() == 15 # noqa: PLR2004 # Create & Update Comics self.importer.update_comics() self.importer.create_comics() md = MappingProxyType(self.importer.metadata) diff_assert(CREATED_COMICS_UPDATE_ALL, md, "CREATED_COMICS_UPDATE_ALL") comic = Comic.objects.get(path=PATH) assert comic # Link self.importer.link_comic_m2m_fields() md = MappingProxyType(self.importer.metadata) diff_assert(LINKED_COMICS_UPDATE_ALL, md, "LINKED_COMICS_UPDATE_ALL") comic = export_test_comic_creation(COMIC_VALUES_UPDATE_ALL) # Fail imports self.importer.fail_imports() md = MappingProxyType(self.importer.metadata) diff_assert(FAILED_IMPORTS_UPDATE_ALL, md, "FAILED_IMPORTS_UPDATE_ALL") # Delete self.importer.delete() md = MappingProxyType(self.importer.metadata) diff_assert(DELETED_COMICS_UPDATE_ALL, md, "DELETED_COMICS_UPDATE_ALL") # FTS self.importer.full_text_search() md = MappingProxyType(self.importer.metadata) diff_assert(FTSED_UPDATE_ALL, md, "FTSED_UPDATE_ALL") export_test_fts_creation(FTS_FINAL_UPDATE_ALL, comic) ================================================ FILE: tests/importer/test_update_none.py ================================================ """Test extract metadata importer.""" from abc import ABC from copy import deepcopy from threading import Event, Lock from types import MappingProxyType from typing import override from loguru import logger from codex.librarian.mp_queue import LIBRARIAN_QUEUE from codex.librarian.scribe.importer.const import ( CREATE_COMICS, CREATE_FKS, DELETE_M2MS, FIS, FTS_CREATED_M2MS, FTS_EXISTING_M2MS, FTS_UPDATE, FTS_UPDATED_M2MS, LINK_FKS, LINK_M2MS, TOTAL, UPDATE_COMICS, UPDATE_FKS, ) from codex.librarian.scribe.importer.importer import ComicImporter from codex.models import Comic, Identifier from tests.importer.test_basic import ( AGGREGATED, COMIC_VALUES_BASIC, FILES_DIR, FTS_FINAL_BASIC, PATH, QUERIED, BaseTestImporter, diff_assert, export_test_comic_creation, export_test_fts_creation, ) UPDATE_PATH = FILES_DIR / "comicbox-2-update.cbz" QUERIED_NONE = MappingProxyType( { CREATE_COMICS: {}, UPDATE_COMICS: {1: {}}, FIS: {}, CREATE_FKS: {TOTAL: 0}, UPDATE_FKS: {TOTAL: 0}, LINK_FKS: {}, LINK_M2MS: {}, DELETE_M2MS: {}, FTS_EXISTING_M2MS: {}, } ) CREATED_FK_UPDATE_NONE = MappingProxyType( { CREATE_COMICS: {}, DELETE_M2MS: {}, FIS: {}, FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_UPDATED_M2MS: {}, LINK_FKS: {}, LINK_M2MS: {}, UPDATE_COMICS: {1: {}}, } ) CREATED_COMICS_UPDATE_NONE = MappingProxyType( { DELETE_M2MS: {}, FIS: {}, FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_UPDATE: {1: {}}, FTS_UPDATED_M2MS: {}, LINK_M2MS: {}, } ) LINKED_COMICS_UPDATE_NONE = MappingProxyType( { FIS: {}, FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_UPDATE: {1: {}}, FTS_UPDATED_M2MS: {}, } ) FAILED_IMPORTS_UPDATE_NONE = MappingProxyType( { FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_UPDATE: {1: {}}, FTS_UPDATED_M2MS: {}, } ) DELETED_COMICS_UPDATE_NONE = MappingProxyType( { FTS_CREATED_M2MS: {}, FTS_EXISTING_M2MS: {}, FTS_UPDATE: {1: {}}, FTS_UPDATED_M2MS: {}, } ) FTSED_UPDATE_NONE = MappingProxyType({}) class BaseTestImporterUpdate(BaseTestImporter, ABC): @override def setUp(self): super().setUp() importer = ComicImporter(self.task, logger, LIBRARIAN_QUEUE, Lock(), Event()) importer.metadata = deepcopy(dict(QUERIED)) importer.create_and_update() importer.link() comic = export_test_comic_creation(COMIC_VALUES_BASIC) importer.full_text_search() export_test_fts_creation(FTS_FINAL_BASIC, comic) class TestImporterUpdateNone(BaseTestImporterUpdate): def test_update_none(self): # Query self.importer.metadata = deepcopy(dict(AGGREGATED)) self.importer.query() md = MappingProxyType(self.importer.metadata) diff_assert(QUERIED_NONE, md, "QUERIED_NONE") # Create & Update Fks self.importer.create_all_fks() self.importer.update_all_fks() md = MappingProxyType(self.importer.metadata) diff_assert(CREATED_FK_UPDATE_NONE, md, "CREATED_FK_UPDATE_NONE") assert Identifier.objects.count() == 3 # noqa: PLR2004 # Create & Update Comics self.importer.update_comics() self.importer.create_comics() md = MappingProxyType(self.importer.metadata) diff_assert(CREATED_COMICS_UPDATE_NONE, md, "CREATED_COMICS_UPDATE_NONE") comic = Comic.objects.get(path=PATH) assert comic # Link self.importer.link_comic_m2m_fields() md = MappingProxyType(self.importer.metadata) diff_assert(LINKED_COMICS_UPDATE_NONE, md, "LINKED_COMICS_UPDATE_NONE") comic = export_test_comic_creation(COMIC_VALUES_BASIC) # Fail imports self.importer.fail_imports() md = MappingProxyType(self.importer.metadata) diff_assert(FAILED_IMPORTS_UPDATE_NONE, md, "FAILED_IMPORTS_UPDATE_NONE") # Delete self.importer.delete() md = MappingProxyType(self.importer.metadata) diff_assert(DELETED_COMICS_UPDATE_NONE, md, "DELETED_COMICS_UPDATE_NONE") # FTS self.importer.full_text_search() md = MappingProxyType(self.importer.metadata) diff_assert(FTSED_UPDATE_NONE, md, "FTSED_UPDATE_NONE") export_test_fts_creation(FTS_FINAL_BASIC, comic) ================================================ FILE: tests/nginx-local-codex.conf ================================================ upstream host_service { server localhost:9810; } server { listen 80; charset utf-8; add_header X-Frame-Options SAMEORIGIN; access_log /dev/stdout; error_log /dev/stderr; # proxies # Docs for using variables to force name re-resolution when upstream containers are re-created. # https://tenzer.dk/nginx-with-dynamic-upstreams/ # proxy_buffering off; # proxy_buffers 8 64k; proxy_set_header Host $http_host; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Host $server_name; proxy_set_header X-Forwarded-Port $server_port; proxy_set_header X-Forwarded-Proto $scheme; # proxy_set_header X-Forwarded-Ssl on; proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Scheme $scheme; # WS proxy_http_version 1.1; proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; gzip_comp_level 6; gzip_proxied any; gzip_types text/plain text/css text/js text/xml text/javascript application/javascript application/json application/xml image/svg+xml; set $codex_upstream http://host_service; location /codex { proxy_pass $codex_upstream; } } ================================================ FILE: tests/test_asgi.py ================================================ """Test the asgi server.""" from django.test import TestCase from codex.asgi import application class EnvironTestCase(TestCase): """Test environment variables.""" def receive(self): """Do nothing.""" def send(self): """Do nothing.""" async def test_application(self): """Don't even test application, yet.""" assert application ================================================ FILE: tests/test_models.py ================================================ """Test models.""" import datetime import shutil from pathlib import Path from typing import override from django.test import TestCase from codex.models import Comic, Imprint, Library, Publisher, Series, Volume TMP_DIR = Path("/tmp/codex.tests") # noqa: S108 class ComicTestCase(TestCase): """Test Comic model.""" COMIC_PATH = TMP_DIR / "foo.cbz" NAME = "foo" DECADE = 1970 YEAR = 1975 MONTH = 4 DAY = 9 DATE = datetime.date(YEAR, MONTH, DAY) @override def setUp(self): """Set up for tests.""" TMP_DIR.mkdir(exist_ok=True, parents=True) self.COMIC_PATH.touch() library = Library.objects.create(path=str(self.COMIC_PATH)) publisher = Publisher.objects.create(name="FooPub") imprint = Imprint.objects.create(name="BarComics", publisher=publisher) series = Series.objects.create( name="Baz Patrol", imprint=imprint, publisher=publisher ) volume = Volume.objects.create( name="2020", series=series, imprint=imprint, publisher=publisher ) Comic.objects.create( library=library, path=self.COMIC_PATH, issue_number=1, name=ComicTestCase.NAME, publisher=publisher, imprint=imprint, series=series, volume=volume, size=100, year=self.YEAR, month=self.MONTH, day=self.DAY, ) @override def tearDown(self): """Tear down tests.""" shutil.rmtree(TMP_DIR) def test_comic_save(self): """Test comic model save method.""" comic = Comic.objects.get(path=self.COMIC_PATH) assert comic.name == self.NAME assert comic.decade == self.DECADE assert comic.date == self.DATE ================================================ FILE: vulture_ignorelist.py ================================================ format_suffix # unused variable (codex/views/reader/page.py:41)