[
  {
    "path": ".flake8",
    "content": "[flake8]\nmax-line-length = 120\nextend-ignore = E203\nper-file-ignores = __init__.py:F401\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/ask_a_question.md",
    "content": "---\nname: Ask a Question\nabout: Ask a question about using dot\nlabels: question\n\n---\n\n## :question: Ask a Question:\n\n### Description:\n\n<!-- A clear and concise description of your question. Ex. what is/how to [...] -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "content": "---\nname: Bug Report\nabout: Report bugs to improve dot\nlabels: bug\n\n---\n\n## :bug: Bug Report\n\n<!-- Note: Remove sections from the template that are not relevant to the issue. -->\n\n### Description:\n\n#### Actual Behavior:\n\n<!-- A clear and concise description of what the bug is, including steps for reproducing it. -->\n\n#### Expected Behavior:\n\n<!-- A clear and concise description of what you expected to happen. -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/documentation.md",
    "content": "---\nname: Documentation\nabout: Report an issue related to dot documentation\nlabels: documentation\n\n---\n\n## :memo: Documentation\n\n<!-- Note: Remove sections from the template that are not relevant to the issue. -->\n\n### Description:\n\n<!-- A clear and concise description of what needs to be added, updated or removed from current documentation. -->\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "content": "---\nname: Feature Request\nabout: Submit a feature request for dot\nlabels: feature\n\n---\n\n## :sparkles: Feature Request\n\n<!-- Note: Remove sections from the template that are not relevant to the issue. -->\n\n### Description:\n\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\n"
  },
  {
    "path": ".github/PULL_REQUEST_TEMPLATE.md",
    "content": "<!-- Is this pull request ready for review? (if not, please submit in draft mode) -->\n\n## Description\n\n<!--\nPlease include a summary of the change and which issue is fixed.\n Please also include relevant motivation and context.\n List any dependencies that are required for this change.\n-->\n\n<!-- remove if not applicable -->\n\nFixes #(issue-number)\n\n### Changelog:\n\n<!--\nAdd changes in a list and add issue number in brackets, if required.\nRemove sections which are not applicable and remember to update CHANGELOG.md as well.\n-->\n\n#### Added:\n\n- ...\n\n#### Updated:\n\n- ...\n\n#### Fixed:\n\n- ...\n\n#### Removed:\n\n- ...\n"
  },
  {
    "path": ".github/workflows/build_dot.yaml",
    "content": "name: build-dot\n\non:\n  push:\n    branches:\n      - main\n    paths-ignore:\n      - \"**.md\"\n  pull_request:\n    types: [opened, synchronize, reopened, ready_for_review]\n    paths-ignore:\n      - \"**.md\"\n\njobs:\n  build-and-test:\n    if: github.event.pull_request.draft == false\n    runs-on: ubuntu-latest\n    steps:\n      - name: Code Checkout\n        uses: actions/checkout@v3\n\n      - name: Set up Python\n        uses: actions/setup-python@v4\n        with:\n          python-version: 3.8\n          cache: 'pip'\n          cache-dependency-path: 'requirements*.txt'\n\n      - name: Install dependencies\n        run: |\n          sudo apt-get update && sudo apt-get install -y ffmpeg libsndfile1-dev\n          pip install -r requirements.txt\n          pip install -e .\n\n      - name: Unit Tests\n        run: |\n          pip install -c requirements-dev.txt --force-reinstall pytest pytest-cov\n          pytest --cov=src --cov-report=term-missing:skip-covered --cov-fail-under=10\n"
  },
  {
    "path": ".github/workflows/code_check.yaml",
    "content": "name: code-check\n\non:\n    push:\n        branches:\n            - main\n    pull_request:\n        types: [opened, synchronize, reopened, ready_for_review]\n\njobs:\n    code-check:\n        if: github.event.pull_request.draft == false\n        runs-on: ubuntu-latest\n        steps:\n            - name: Code Checkout\n              uses: actions/checkout@v2\n\n            - name: Set up Python 3.8\n              uses: actions/setup-python@v2\n              with:\n                  python-version: 3.8\n                  cache: 'pip'\n                  cache-dependency-path: 'requirements*.txt'\n\n            - uses: actions/cache@v3\n              with:\n                  path: ~/.cache/pre-commit\n                  key: ${{ runner.os }}-pre-commit-${{ hashFiles('.pre-commit-config.yaml') }}\n\n            - name: Code Check\n              run: |\n                  pip install pre-commit\n                  pre-commit run --all --show-diff-on-failure\n"
  },
  {
    "path": ".gitignore",
    "content": "# repo ignores\r\ndata/results/*\r\nsaved_models/*\r\n*.patch\r\n\r\n# Created by https://www.toptal.com/developers/gitignore/api/python,macos,windows,linux\r\n# Edit at https://www.toptal.com/developers/gitignore?templates=python,macos,windows,linux\r\n\r\n### Linux ###\r\n*~\r\n\r\n# temporary files which can be created if a process still has a handle open of a deleted file\r\n.fuse_hidden*\r\n\r\n# KDE directory preferences\r\n.directory\r\n\r\n# Linux trash folder which might appear on any partition or disk\r\n.Trash-*\r\n\r\n# .nfs files are created when an open file is removed but is still being accessed\r\n.nfs*\r\n\r\n### macOS ###\r\n# General\r\n.DS_Store\r\n.AppleDouble\r\n.LSOverride\r\n\r\n# Icon must end with two \\r\r\nIcon\r\n\r\n\r\n# Thumbnails\r\n._*\r\n\r\n# Files that might appear in the root of a volume\r\n.DocumentRevisions-V100\r\n.fseventsd\r\n.Spotlight-V100\r\n.TemporaryItems\r\n.Trashes\r\n.VolumeIcon.icns\r\n.com.apple.timemachine.donotpresent\r\n\r\n# Directories potentially created on remote AFP share\r\n.AppleDB\r\n.AppleDesktop\r\nNetwork Trash Folder\r\nTemporary Items\r\n.apdisk\r\n\r\n### macOS Patch ###\r\n# iCloud generated files\r\n*.icloud\r\n\r\n### Python ###\r\n# Byte-compiled / optimized / DLL files\r\n__pycache__/\r\n*.py[cod]\r\n*$py.class\r\n\r\n# C extensions\r\n*.so\r\n\r\n# Distribution / packaging\r\n.Python\r\nbuild/\r\ndevelop-eggs/\r\ndist/\r\ndownloads/\r\neggs/\r\n.eggs/\r\nlib/\r\nlib64/\r\nparts/\r\nsdist/\r\nvar/\r\nwheels/\r\nshare/python-wheels/\r\n*.egg-info/\r\n.installed.cfg\r\n*.egg\r\nMANIFEST\r\n\r\n# PyInstaller\r\n#  Usually these files are written by a python script from a template\r\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\r\n*.manifest\r\n*.spec\r\n\r\n# Installer logs\r\npip-log.txt\r\npip-delete-this-directory.txt\r\n\r\n# Unit test / coverage reports\r\nhtmlcov/\r\n.tox/\r\n.nox/\r\n.coverage\r\n.coverage.*\r\n.cache\r\nnosetests.xml\r\ncoverage.xml\r\n*.cover\r\n*.py,cover\r\n.hypothesis/\r\n.pytest_cache/\r\ncover/\r\n\r\n# Translations\r\n*.mo\r\n*.pot\r\n\r\n# Django stuff:\r\n*.log\r\nlocal_settings.py\r\ndb.sqlite3\r\ndb.sqlite3-journal\r\n\r\n# Flask stuff:\r\ninstance/\r\n.webassets-cache\r\n\r\n# Scrapy stuff:\r\n.scrapy\r\n\r\n# Sphinx documentation\r\ndocs/_build/\r\n\r\n# PyBuilder\r\n.pybuilder/\r\ntarget/\r\n\r\n# Jupyter Notebook\r\n.ipynb_checkpoints\r\n\r\n# IPython\r\nprofile_default/\r\nipython_config.py\r\n\r\n# pyenv\r\n#   For a library or package, you might want to ignore these files since the code is\r\n#   intended to run in multiple environments; otherwise, check them in:\r\n# .python-version\r\n\r\n# pipenv\r\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\r\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\r\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\r\n#   install all needed dependencies.\r\n#Pipfile.lock\r\n\r\n# poetry\r\n#   Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.\r\n#   This is especially recommended for binary packages to ensure reproducibility, and is more\r\n#   commonly ignored for libraries.\r\n#   https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control\r\n#poetry.lock\r\n\r\n# pdm\r\n#   Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.\r\n#pdm.lock\r\n#   pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it\r\n#   in version control.\r\n#   https://pdm.fming.dev/#use-with-ide\r\n.pdm.toml\r\n\r\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm\r\n__pypackages__/\r\n\r\n# Celery stuff\r\ncelerybeat-schedule\r\ncelerybeat.pid\r\n\r\n# SageMath parsed files\r\n*.sage.py\r\n\r\n# Environments\r\n.env\r\n.venv\r\nenv/\r\nvenv/\r\nENV/\r\nenv.bak/\r\nvenv.bak/\r\n\r\n# Spyder project settings\r\n.spyderproject\r\n.spyproject\r\n\r\n# Rope project settings\r\n.ropeproject\r\n\r\n# mkdocs documentation\r\n/site\r\n\r\n# mypy\r\n.mypy_cache/\r\n.dmypy.json\r\ndmypy.json\r\n\r\n# Pyre type checker\r\n.pyre/\r\n\r\n# pytype static type analyzer\r\n.pytype/\r\n\r\n# Cython debug symbols\r\ncython_debug/\r\n\r\n# PyCharm\r\n#  JetBrains specific template is maintained in a separate JetBrains.gitignore that can\r\n#  be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore\r\n#  and can be added to the global gitignore or merged into this file.  For a more nuclear\r\n#  option (not recommended) you can uncomment the following to ignore the entire idea folder.\r\n#.idea/\r\n\r\n### Windows ###\r\n# Windows thumbnail cache files\r\nThumbs.db\r\nThumbs.db:encryptable\r\nehthumbs.db\r\nehthumbs_vista.db\r\n\r\n# Dump file\r\n*.stackdump\r\n\r\n# Folder config file\r\n[Dd]esktop.ini\r\n\r\n# Recycle Bin used on file shares\r\n$RECYCLE.BIN/\r\n\r\n# Windows Installer files\r\n*.cab\r\n*.msi\r\n*.msix\r\n*.msm\r\n*.msp\r\n\r\n# Windows shortcuts\r\n*.lnk\r\n\r\n# End of https://www.toptal.com/developers/gitignore/api/python,macos,windows,linux\r\n"
  },
  {
    "path": ".pre-commit-config.yaml",
    "content": "default_language_version:\n    python: python3.8\n\nrepos:\n      - repo: https://github.com/pre-commit/pre-commit-hooks\n        rev: v4.3.0\n        hooks:\n          - id: check-json\n          - id: check-toml\n          - id: check-yaml\n            args: [--allow-multiple-documents]\n          - id: end-of-file-fixer\n          - id: mixed-line-ending\n          - id: trailing-whitespace\n            args: [--markdown-linebreak-ext=md]\n            exclude: \"setup.cfg\"\n\n      - repo: https://github.com/psf/black\n        rev: 22.6.0\n        hooks:\n          - id: black\n\n      - repo: https://github.com/PyCQA/flake8\n        rev: 6.0.0\n        hooks:\n          - id: flake8\n            args: [--max-line-length=150, --extend-ignore=E203]\n\n      - repo: https://github.com/PyCQA/isort\n        rev: 5.12.0\n        hooks:\n          - id: isort\n            args: [\"--profile\", \"black\"]\n\n      - repo: https://github.com/pre-commit/mirrors-mypy\n        rev: v0.961\n        hooks:\n          - id: mypy\n            files: ^dot/\n            args: [--ignore-missing, --no-strict-optional]\n            additional_dependencies: [types-pyyaml, types-requests]\n"
  },
  {
    "path": ".yamllint",
    "content": "---\nyaml-files:\n    - '*.yaml'\n    - '*.yml'\n    - .yamllint\n\nrules:\n    braces: enable\n    brackets: enable\n    colons: enable\n    commas: enable\n    comments:\n        level: warning\n    comments-indentation:\n        level: warning\n    document-end: disable\n    document-start: disable\n    empty-lines: enable\n    empty-values: disable\n    hyphens: enable\n    indentation: enable\n    key-duplicates: enable\n    key-ordering: disable\n    line-length: disable\n    new-line-at-end-of-file: enable\n    new-lines: enable\n    octal-values: disable\n    quoted-strings: disable\n    trailing-spaces: enable\n    truthy:\n        level: warning\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),\nand this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).\n\n## [Unreleased]\n\n* Fix fomm model download by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/160\n* Add video and image swap to the GUI by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/116\n\n## [1.3.0] - 2024-02-19\n\n## What's Changed\n* Trace error in CLI and UI by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/137\n* Update Windows executable by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/133\n* Update colab notebook by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/128\n* Add a Docker container for dot by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/95\n* Fix of cusolver error on GPU by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/110\n* Update the GUI, PyTorch and the documentation by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/107\n\n**Full Changelog**: https://github.com/sensity-ai/dot/compare/1.2.0...1.3.0\n\n## [1.2.0] - 2023-07-20\n\n## What's Changed\n* Create a dot executable for windows by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/92\n* Add a graphical interface for dot by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/85\n* Update README and CONTRIBUTING by @giorgiop in https://github.com/sensity-ai/dot/pull/40\n* Fix config paths in additional scripts under `scripts/` folder by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/43\n* Update README and add instructions for running dot with an Android emulator by @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/45\n\n**Full Changelog**: https://github.com/sensity-ai/dot/compare/1.1.0...1.2.0\n\n## [1.1.0] - 2022-07-27\n\n## What's Changed\n* Update readme by @giorgiop in https://github.com/sensity-ai/dot/pull/6\n* Add more press on README.md by @giorgiop in https://github.com/sensity-ai/dot/pull/7\n* [ImgBot] Optimize images by @imgbot in https://github.com/sensity-ai/dot/pull/8\n* Update README to Download Models from Github Release Binaries by @ajndkr in https://github.com/sensity-ai/dot/pull/19\n* Update README + Add Github Templates by @ajndkr in https://github.com/sensity-ai/dot/pull/16\n* Verify camera ID when running dot in camera mode by @ajndkr in https://github.com/sensity-ai/dot/pull/18\n* Add Feature to Use Config Files by @ajndkr in https://github.com/sensity-ai/dot/pull/17\n* ⬆️ Bump numpy from 1.21.1 to 1.22.0 by @dependabot in https://github.com/sensity-ai/dot/pull/25\n* Update python version to 3.8 by @vassilispapadop in https://github.com/sensity-ai/dot/pull/28\n* Requirements changes now trigger CI by @giorgiop in https://github.com/sensity-ai/dot/pull/27\n* Fix python3.8 pip cache location in CI by @ajndkr in https://github.com/sensity-ai/dot/pull/29\n* Fix `--save_folder` CLI Option by @vassilispapadop and @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/26\n* Add contributors list by @ajndkr in https://github.com/sensity-ai/dot/pull/31\n* Add Google Colab demo notebook by @ajndkr in https://github.com/sensity-ai/dot/pull/33\n* Speed up SimSwap's `reverse2original` by @ajndkr and @Ghassen-Chaabouni in https://github.com/sensity-ai/dot/pull/20\n* Add `bumpversion` for semantic versioning by @ajndkr in https://github.com/sensity-ai/dot/pull/34\n* Update README with speed metrics by @giorgiop in https://github.com/sensity-ai/dot/pull/37\n\n## New Contributors\n* @giorgiop made their first contribution in https://github.com/sensity-ai/dot/pull/6\n* @ghassen1302 made their first contribution in https://github.com/sensity-ai/dot/pull/6\n* @imgbot made their first contribution in https://github.com/sensity-ai/dot/pull/8\n* @ajndkr made their first contribution in https://github.com/sensity-ai/dot/pull/19\n* @dependabot made their first contribution in https://github.com/sensity-ai/dot/pull/25\n* @vassilispapadop made their first contribution in https://github.com/sensity-ai/dot/pull/28\n\n**Full Changelog**: https://github.com/sensity-ai/dot/compare/1.0.0...1.1.0\n\n## [1.0.0] - 2022-06-04\n\n* dot is open sourced\n\n**Full Changelog**: https://github.com/sensity-ai/dot/commits/1.0.0\n\n[Unreleased]: https://github.com/sensity-ai/dot/compare/1.2.0...HEAD\n[1.2.0]: https://github.com/sensity-ai/dot/compare/1.1.0...1.2.0\n[1.1.0]: https://github.com/sensity-ai/dot/compare/1.0.0...1.1.0\n[1.0.0]: https://github.com/sensity-ai/dot/releases/tag/1.0.0\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contributing\n\nWhen contributing to this repository, please refer to the following.\n\n## Suggested Guidelines\n\n1. When opening a pull request (PR), the title should be clear and concise in describing the changes. The PR description can include a more descriptive log of the changes.\n2. If the pull request (PR) is linked to a specific issue, the PR should be linked to the issue. You can use the [Closing Keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/linking-a-pull-request-to-an-issue) in the PR description to automatically link the issue. Merging a PR will close the linked issue.\n3. This repository follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html) for code formatting.\n4. If you are working on improving the speed of *dot*, please read first our guide on [code profiling](docs/profiling.md).\n\n## Setup Dev-Tools\n\n1. Install Dev Requirements\n\n ```bash\n pip install -r requirements-dev.txt\n ```\n\n2. Install Pre-Commit Hooks\n\n ```bash\n pre-commit install\n ```\n\n## CI/CD\n\nRun Unit Tests (with coverage):\n\n```bash\npytest --cov=src --cov-report=term-missing:skip-covered --cov-fail-under=10\n```\n\nLock Base and Dev Requirements (pre-requisite: `pip install pip-tools==6.8.0`):\n\n ```bash\n pip-compile setup.cfg\n pip-compile --extra=dev --output-file=requirements-dev.txt --strip-extras setup.cfg\n ```\n\n## Semantic Versioning\n\nThis repository follows the [Semantic Versioning](https://semver.org/) standard.\n\nBump a major release:\n\n```bash\nbumpversion major\n```\n\nBump a minor release:\n\n```bash\nbumpversion minor\n```\n\nBump a patch release:\n\n```bash\nbumpversion patch\n```\n"
  },
  {
    "path": "Dockerfile",
    "content": "FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04\n\n# copy repo codebase\nCOPY . ./dot\n\n# set working directory\nWORKDIR ./dot\n\nARG DEBIAN_FRONTEND=noninteractive\n\n# Install system dependencies\nRUN apt-get update && apt-get install -y --no-install-recommends \\\n    # Needed by opencv\n    libglib2.0-0 libsm6 libgl1 \\\n    libxext6 libxrender1 ffmpeg \\\n    build-essential cmake wget unzip zip \\\n    git libprotobuf-dev protobuf-compiler \\\n    && apt-get clean && rm -rf /var/lib/apt/lists/*\n\n# Install Miniconda\nRUN wget \\\n    https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \\\n    && mkdir /root/.conda \\\n    && bash Miniconda3-latest-Linux-x86_64.sh -b \\\n    && rm -f Miniconda3-latest-Linux-x86_64.sh\n\n# Add Miniconda to the PATH environment variable\nENV PATH=\"/root/miniconda3/bin:${PATH}\"\n\nRUN conda --version\n\n# Install requirements\nRUN conda config --add channels conda-forge\nRUN conda install python==3.8\nRUN conda install pip==21.3\nRUN pip install onnxruntime-gpu==1.9.0\nRUN pip install -r requirements.txt\n\n# Install pytorch\nRUN pip install --no-cache-dir torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118\n\n# Install dot\nRUN pip install -e .\n\n# Download and extract the checkpoints\nRUN pip install gdown\nRUN gdown 1Qaf9hE62XSvgmxR43dfiwEPWWS_dXSCE\nRUN unzip -o dot_model_checkpoints.zip\nRUN rm -rf *.z*\n\nENTRYPOINT /bin/bash\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2022, Sensity B.V.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "<div align=\"center\">\n\n<h1> the Deepfake Offensive Toolkit </h1>\n\n[![stars](https://img.shields.io/github/stars/sensity-ai/dot)](https://github.com/sensity-ai/dot/stargazers)\n[![license](https://img.shields.io/badge/License-BSD_3--Clause-blue.svg)](https://github.com/sensity-ai/dot/blob/main/LICENSE)\n[![Python 3.8](https://img.shields.io/badge/python-3.8-blue.svg)](https://www.python.org/downloads/release/python-3812/)\n[![build-dot](https://github.com/sensity-ai/dot/actions/workflows/build_dot.yaml/badge.svg)](https://github.com/sensity-ai/dot/actions/workflows/build_dot.yaml)\n[![code-check](https://github.com/sensity-ai/dot/actions/workflows/code_check.yaml/badge.svg)](https://github.com/sensity-ai/dot/actions/workflows/code_check.yaml)\n\n<a href=\"https://colab.research.google.com/github/sensity-ai/dot/blob/main/notebooks/colab_demo.ipynb\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" height=20></a>\n\n</div>\n\n*dot* (aka Deepfake Offensive Toolkit) makes real-time, controllable deepfakes ready for virtual cameras injection. *dot* is created for performing penetration testing against e.g. identity verification and video conferencing systems, for the use by security analysts, Red Team members, and biometrics researchers.\n\nIf you want to learn more about *dot* is used for penetration tests with deepfakes in the industry, read these articles by [The Verge](https://www.theverge.com/2022/5/18/23092964/deepfake-attack-facial-recognition-liveness-test-banks-sensity-report) and [Biometric Update](https://www.biometricupdate.com/202205/sensity-alleges-biometric-onboarding-providers-downplaying-deepfake-threat).\n\ndot *is developed for research and demonstration purposes. As an end user, you have the responsibility to obey all applicable laws when using this program. Authors and contributing developers assume no liability and are not responsible for any misuse or damage caused by the use of this program.*\n\n<p align=\"center\">\n<img src=\"./assets/dot_intro.gif\" width=\"500\"/>\n</p>\n\n## How it works\n\nIn a nutshell, *dot* works like this\n\n```mermaid\nflowchart LR;\n    A(your webcam feed) --> B(suite of realtime deepfakes);\n    B(suite of realtime deepfakes) --> C(virtual camera injection);\n```\n\nAll deepfakes supported by *dot* do not require additional training. They can be used\nin real-time on the fly on a photo that becomes the target of face impersonation.\nSupported methods:\n\n- face swap (via [SimSwap](https://github.com/neuralchen/SimSwap)), at resolutions `224` and `512`\n  - with the option of face superresolution (via [GPen](https://github.com/yangxy/GPEN)) at resolutions `256` and `512`\n- lower quality face swap (via OpenCV)\n- [FOMM](https://github.com/AliaksandrSiarohin/first-order-model), First Order Motion Model for image animation\n\n## Running dot\n\n### Graphical interface\n\n#### GUI Installation\n\nDownload and run the dot executable for your OS:\n\n- Windows (Tested on Windows 10 and 11):\n  - Download `dot.zip` from [here](https://drive.google.com/file/d/1_duaEs2SAUGfAvr5oC4V3XR-ZzBtWQXo/view), unzip it and then run `dot.exe`\n- Ubuntu:\n  - ToDo\n- Mac (Tested on Apple M2 Sonoma 14.0):\n\n  - Download `dot-m2.zip` from [here](https://drive.google.com/file/d/1KTRzQrl_AVpiFIxUxW_k2F5EsosJJ_1Y/view?usp=sharing) and unzip it\n  - Open terminal and run `xattr -cr dot-executable.app` to remove any extended attributes\n  - In case of camera reading error:\n    - Right click and choose `Show Package Contents`\n    - Execute `dot-executable` from `Contents/MacOS` folder\n\n#### GUI Usage\n\nUsage example:\n\n1. Specify the source image in the field `source`.\n2. Specify the camera id number in the field `target`. In most cases, `0` is the correct camera id.\n3. Specify the config file in the field `config_file`. Select a default configuration from the dropdown list or use a custom file.\n4. (Optional) Check the field `use_gpu` to use the GPU.\n5. Click on the `RUN` button to start the deepfake.\n\nFor more information about each field, click on the menu `Help/Usage`.\n\nWatch the following demo video for better understanding of the interface\n\n<p align=\"center\">\n<img src=\"./assets/gui_dot_demo.gif\" width=\"500\" height=\"406\"/>\n</p>\n\n### Command Line\n\n#### CLI Installation\n\n##### Install Pre-requisites\n\n- Linux\n\n    ```bash\n    sudo apt install ffmpeg cmake\n    ```\n\n- MacOS\n\n    ```bash\n    brew install ffmpeg cmake\n    ```\n\n- Windows\n\n    1. Download and install Visual Studio Community from [here](https://visualstudio.microsoft.com/vs/community/)\n    2. Install Desktop development with C++ from the Visual studio installer\n\n##### Create Conda Environment\n\n> The instructions assumes that you have Miniconda installed on your machine. If you don't, you can refer to this [link](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html) for installation instructions.\n\n###### With GPU Support\n\n```bash\nconda env create -f envs/environment-gpu.yaml\nconda activate dot\n```\n\nInstall the `torch` and `torchvision` dependencies based on the CUDA version installed on your machine:\n\n- Install CUDA 11.8 from [link](https://developer.nvidia.com/cuda-11-8-0-download-archive)\n- Install `cudatoolkit` from `conda`: `conda install cudatoolkit=<cuda_version_no>` (replace `<cuda_version_no>` with the version on your machine)\n- Install `torch` and `torchvision` dependencies: `pip install torch==2.0.1+<cuda_tag> torchvision==0.15.2+<cuda_tag> torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118`, where `<cuda_tag>` is the CUDA tag defined by Pytorch. For example, `pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118` for CUDA 11.8.\n\n  Note: `torch1.9.0+cu111` can also be used.\n\nTo check that `torch` and `torchvision` are installed correctly, run the following command: `python -c \"import torch; print(torch.cuda.is_available())\"`. If the output is `True`, the dependencies are installed with CUDA support.\n\n###### With MPS Support(Apple Silicon)\n\n```bash\nconda env create -f envs/environment-apple-m2.yaml\nconda activate dot\n```\n\nTo check that `torch` and `torchvision` are installed correctly, run the following command: `python -c \"import torch; print(torch.backends.mps.is_available())\"`. If the output is `True`, the dependencies are installed with Metal programming framework support.\n\n###### With CPU Support (slow, not recommended)\n\n```bash\nconda env create -f envs/environment-cpu.yaml\nconda activate dot\n```\n\n##### Install dot\n\n```bash\npip install -e .\n```\n\n##### Download Models\n\n- Download dot model checkpoints from [here](https://drive.google.com/file/d/1Y_11R66DL4N1WY8cNlXVNR3RkHnGDGWX/view)\n- Unzip the downloaded file in the root of this project\n\n#### CLI Usage\n\nRun `dot --help` to get a full list of available options.\n\n1. Simswap\n\n    ```bash\n    dot -c ./configs/simswap.yaml --target 0 --source \"./data\" --use_gpu\n    ```\n\n2. SimSwapHQ\n\n    ```bash\n    dot -c ./configs/simswaphq.yaml --target 0 --source \"./data\" --use_gpu\n    ```\n\n3. FOMM\n\n    ```bash\n    dot -c ./configs/fomm.yaml --target 0 --source \"./data\" --use_gpu\n    ```\n\n4. FaceSwap CV2\n\n    ```bash\n    dot -c ./configs/faceswap_cv2.yaml --target 0 --source \"./data\" --use_gpu\n\n    ```\n\n**Note**: To enable face superresolution, use the flag `--gpen_type gpen_256` or `--gpen_type gpen_512`. To use *dot* on CPU (not recommended), do not pass the `--use_gpu` flag.\n\n#### Controlling dot with CLI\n\n> **Disclaimer**: We use the `SimSwap` technique for the following demonstration\n\nRunning *dot* via any of the above methods generates real-time Deepfake on the input video feed using source images from the `data/` folder.\n\n<p align=\"center\">\n<img src=\"./assets/dot_run.gif\" width=\"500\"/>\n</p>\n\nWhen running *dot* a list of available control options appear on the terminal window as shown above. You can toggle through and select different source images by pressing the associated control key.\n\nWatch the following demo video for better understanding of the control options:\n\n<p align=\"center\">\n<img src=\"./assets/dot_demo.gif\" width=\"480\"/>\n</p>\n\n## Docker\n\n### Setting up docker\n\n- Build the container\n\n    ```\n    docker-compose up --build -d\n    ```\n\n- Access the container\n\n    ```\n    docker-compose exec dot \"/bin/bash\"\n    ```\n\n### Connect docker to the webcam\n\n#### Ubuntu\n\n1. Build the container\n\n    ```\n    docker build -t dot -f Dockerfile .\n    ```\n\n2. Run the container\n\n    ```\n    xhost +\n    docker run -ti --gpus all \\\n    -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \\\n    -e NVIDIA_VISIBLE_DEVICES=all \\\n    -e PYTHONUNBUFFERED=1 \\\n    -e DISPLAY \\\n    -v .:/dot \\\n    -v /tmp/.X11-unix:/tmp/.X11-unix:rw \\\n    --runtime nvidia \\\n    --entrypoint /bin/bash \\\n    -p 8080:8080 \\\n    --device=/dev/video0:/dev/video0 \\\n    dot\n    ```\n\n#### Windows\n\n1. Follow the instructions [here](https://medium.com/@jijupax/connect-the-webcam-to-docker-on-mac-or-windows-51d894c44468) under Windows to set up the webcam with docker.\n\n2. Build the container\n\n    ```\n    docker build -t dot -f Dockerfile .\n    ```\n3. Run the container\n\n    ```\n    docker run -ti --gpus all \\\n    -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \\\n    -e NVIDIA_VISIBLE_DEVICES=all \\\n    -e PYTHONUNBUFFERED=1 \\\n    -e DISPLAY=192.168.99.1:0 \\\n    -v .:/dot \\\n    --runtime nvidia \\\n    --entrypoint /bin/bash \\\n    -p 8080:8080 \\\n    --device=/dev/video0:/dev/video0 \\\n    -v /tmp/.X11-unix:/tmp/.X11-unix \\\n    dot\n    ```\n\n#### macOS\n\n1. Follow the instructions [here](https://github.com/gzupark/boot2docker-webcam-mac/blob/master/README.md) to set up the webcam with docker.\n\n2. Build the container\n\n    ```\n    docker build -t dot -f Dockerfile .\n    ```\n3. Run the container\n\n    ```\n    docker run -ti --gpus all \\\n    -e NVIDIA_DRIVER_CAPABILITIES=compute,utility \\\n    -e NVIDIA_VISIBLE_DEVICES=all \\\n    -e PYTHONUNBUFFERED=1 \\\n    -e DISPLAY=$IP:0 \\\n    -v .:/dot \\\n    -v /tmp/.X11-unix:/tmp/.X11-unix \\\n    --runtime nvidia \\\n    --entrypoint /bin/bash \\\n    -p 8080:8080 \\\n    --device=/dev/video0:/dev/video0 \\\n    dot\n    ```\n\n## Virtual Camera Injection\n\nInstructions vary depending on your operating system.\n\n### Windows\n\n- Install [OBS Studio](https://obsproject.com/).\n\n- Run OBS Studio.\n\n- In the Sources section, press on Add button (\"+\" sign),\n\n  select Windows Capture and press OK. In the appeared window,\n  choose \"[python.exe]: fomm\" in Window drop-down menu and press OK.\n  Then select Edit -> Transform -> Fit to screen.\n\n- In OBS Studio, go to Tools -> VirtualCam. Check AutoStart,\n\n  set Buffered Frames to 0 and press Start.\n\n- Now `OBS-Camera` camera should be available in Zoom\n\n  (or other videoconferencing software).\n\n### Ubuntu\n\n```bash\nsudo apt update\nsudo apt install v4l-utils v4l2loopback-dkms v4l2loopback-utils\nsudo modprobe v4l2loopback devices=1 card_label=\"OBS Cam\" exclusive_caps=1\nv4l2-ctl --list-devices\nsudo add-apt-repository ppa:obsproject/obs-studio\nsudo apt install obs-studio\n```\n\nOpen `OBS Studio` and check if `tools --> v4l2sink` exists.\nIf it doesn't follow these instructions:\n\n```bash\nmkdir -p ~/.config/obs-studio/plugins/v4l2sink/bin/64bit/\nln -s /usr/lib/obs-plugins/v4l2sink.so ~/.config/obs-studio/plugins/v4l2sink/bin/64bit/\n```\n\nUse the virtual camera with `OBS Studio`:\n\n- Open `OBS Studio`\n- Go to `tools --> v4l2sink`\n- Select `/dev/video2` and `YUV420`\n- Click on `start`\n- Join a meeting and select `OBS Cam`\n\n### MacOS\n\n- Download and install OBS Studio for MacOS from [here](https://obsproject.com/)\n- Open OBS and follow the first-time setup (you might be required to enable certain permissions in *System Preferences*)\n- Run *dot* with `--use_cam` flag to enable camera feed\n- Click the \"+\" button in the sources section → select \"Windows Capture\", create a new source and enter \"OK\" → select window with \"python\" included in the name and enter OK\n- Click \"Start Virtual Camera\" button in the controls section\n- Select \"OBS Cam\" as default camera in the video settings of the application target of the injection\n\n## Run dot with an Android emulator\n\nIf you are performing a test against a mobile app, virtual cameras are much harder to inject. An alternative is to use mobile emulators and still resort to virtual camera injection.\n\n- Run `dot`. Check [running dot](https://github.com/sensity-ai/dot#running-dot) for more information.\n\n- Run `OBS Studio` and set up the virtual camera. Check [virtual-camera-injection](https://github.com/sensity-ai/dot#virtual-camera-injection) for more information.\n\n- Download and Install [Genymotion](https://www.genymotion.com/download/).\n\n- Open Genymotion and set up the Android emulator.\n\n- Set up dot with the Android emulator:\n  - Open the Android emulator.\n  - Click on `camera` and select `OBS-Camera` as front and back cameras. A preview of the dot window should appear.\n  In case there is no preview, restart `OBS` and the emulator and try again.\n  If that didn't work, use a different virtual camera software like `e2eSoft VCam` or `ManyCam`.\n  - `dot` deepfake output should be now the emulator's phone camera.\n\n## Speed\n\n### With GPU\n\n\nTested on a AMD Ryzen 5 2600 Six-Core Processor with one NVIDIA GeForce RTX 2070\n\n```example\nSimswap: FPS 13.0\nSimswap + gpen 256: FPS 7.0\nSimswapHQ: FPS 11.0\nFOMM: FPS 31.0\n```\n\n### With Apple Silicon\n\n\nTested on Macbook Air M2 2022 16GB\n```example\nSimswap: FPS 3.2\nSimswap + gpen 256: FPS 1.8\nSimswapHQ: FPS 2.7\nFOMM: FPS 2.0\n```\n\n## License\n\n*This is not a commercial Sensity product, and it is distributed freely with no warranties*\n\nThe software is distributed under [BSD 3-Clause](LICENSE).\n*dot* utilizes several open source libraries. If you use *dot*, make sure you agree with their\nlicenses too. In particular, this codebase is built on top of the following research projects:\n\n- <https://github.com/AliaksandrSiarohin/first-order-model>\n- <https://github.com/alievk/avatarify-python>\n- <https://github.com/neuralchen/SimSwap>\n- <https://github.com/yangxy/GPEN>\n\n## Contributing\n\nIf you have ideas for improving *dot*, feel free to open relevant Issues and PRs. Please read [CONTRIBUTING.md](./CONTRIBUTING.md) before contributing to the repository.\n\n## Maintainers\n\n- [@ghassen1302](https://github.com/ghassen1302)\n- [@vassilispapadop](https://github.com/vassilispapadop)\n- [@giorgiop](https://github.com/giorgiop)\n- [@AjinkyaIndulkar](https://github.com/AjinkyaIndulkar)\n- [@kjod](https://github.com/kjod)\n\n## Contributors\n\n[![](https://img.shields.io/github/contributors-anon/sensity-ai/dot)](https://github.com/sensity-ai/dot/graphs/contributors)\n\n<a href=\"https://github.com/sensity-ai/dot/graphs/contributors\">\n  <img src=\"https://contrib.rocks/image?repo=sensity-ai/dot\" />\n</a>\n\n## Run `dot` on pre-recorded image and video files\n\n- [Run *dot* on image and video files instead of camera feed](docs/run_without_camera.md)\n\n## FAQ\n\n- **`dot` is very slow and I can't run it in real time**\n\nMake sure that you are running it on a GPU card by using the `--use_gpu` flag. CPU is not recommended.\nIf you still find it too slow it may be because you running it on an old GPU model, with less than 8GB of RAM.\n\n- **Does `dot` only work with a webcam feed or also with a pre-recorded video?**\n\nYou can use `dot` on a pre-recorded video file by [these scripts](docs/run_without_camera.md) or try it directly on [Colab](https://colab.research.google.com/github/sensity-ai/dot/blob/main/notebooks/colab_demo.ipynb).\n"
  },
  {
    "path": "configs/faceswap_cv2.yaml",
    "content": "---\nswap_type: faceswap_cv2\nmodel_path: saved_models/faceswap_cv/shape_predictor_68_face_landmarks.dat\n"
  },
  {
    "path": "configs/fomm.yaml",
    "content": "---\nswap_type: fomm\nmodel_path: saved_models/fomm/vox-adv-cpk.pth.tar\nhead_pose: true\n"
  },
  {
    "path": "configs/simswap.yaml",
    "content": "---\nswap_type: simswap\nparsing_model_path: saved_models/simswap/parsing_model/checkpoint/79999_iter.pth\narcface_model_path: saved_models/simswap/arcface_model/arcface_checkpoint.tar\ncheckpoints_dir: saved_models/simswap/checkpoints\n"
  },
  {
    "path": "configs/simswaphq.yaml",
    "content": "---\nswap_type: simswap\nparsing_model_path: saved_models/simswap/parsing_model/checkpoint/79999_iter.pth\narcface_model_path: saved_models/simswap/arcface_model/arcface_checkpoint.tar\ncheckpoints_dir: saved_models/simswap/checkpoints\ncrop_size: 512\n"
  },
  {
    "path": "docker-compose.yml",
    "content": "services:\n  dot:\n    build:\n      context: .\n      dockerfile: Dockerfile\n    # Set environment variables, if needed\n    environment:\n      - PYTHONUNBUFFERED=1\n      - NVIDIA_DRIVER_CAPABILITIES=compute,utility\n      - NVIDIA_VISIBLE_DEVICES=all\n    # Preserve files across container restarts\n    volumes:\n      - .:/dot\n    # Use NVIDIA runtime to enable GPU support in the container\n    runtime: nvidia\n    entrypoint: /bin/bash\n    ports:\n      - \"8080:8080\"\n    container_name: dot\n    stdin_open: true\n    tty: true\n"
  },
  {
    "path": "docs/create_executable.md",
    "content": "# Create executable\n\nCreate an executable of dot for different OS.\n\n## Windows\n\nFollow these steps to generate the executable for Windows.\n\n1. Run these commands\n\n```\ncd path/to/dot\nconda activate dot\n```\n\n2. Get the path of the `site-packages` by running this command\n\n```\npython -c \"import site; print(''.join(site.getsitepackages()))\"\n```\n\n3. Replace `path/to/site-packages` with the path of the `site-packages` and run this command\n\n```\npyinstaller --noconfirm --onedir --name \"dot\" --add-data \"src/dot/fomm/config;dot/fomm/config\" --add-data \"src/dot/simswap/models;dot/simswap/models\" --add-data \"path/to/site-packages;.\" --add-data \"configs;configs/\" --add-data \"data;data/\" --add-data \"saved_models;saved_models/\" src/dot/ui/ui.py\n```\n\nThe executable files can be found under the folder `dist`.\n\n## Ubuntu\n\nToDo\n\n## Mac\nFollow these steps to generate the executable for Mac.\n\n1. Run these commands\n\n```\ncd path/to/dot\nconda activate dot\n```\n\n2. Get the path of the `site-packages` by running this command\n\n```\npython -c \"import site; print(''.join(site.getsitepackages()))\"\n```\n\n3. Replace `path/to/site-packages` with the path of the `site-packages` and run this comman\n\n```\npyinstaller --noconfirm --onedir --name \"dot\" --add-data \"src/dot/fomm/config:dot/fomm/config\" --add-data \"src/dot/simswap/models:dot/simswap/models\" --add-data \"path/to/site-packages:.\" --add-data \"configs:configs/\" --add-data \"data:data/\" --add-data \"saved_models:saved_models/\" src/dot/ui/ui.py\n```\n\nThe executable files can be found under the folder `dist`.\n"
  },
  {
    "path": "docs/profiling.md",
    "content": "# Profiling\n\nProfiling should be carried out whenever significant changes are made to the pipeline. Profiling results are saved as `.txt` and `.prof` files.\n\n## Scripts\n\n### Profile SimSwap - `profile_simswap.py`\n\nThis script profiles SimSwap pipeline on a single image pair.\n\n#### Basic Usage\n\n```bash\npython profile_simswap.py\n```\n\n## Visualisation Tools\n\nApart from analysing the `.txt` profiling data, we visualise and explore the `.prof` profiling data with:\n\n* [snakeviz](#snakviz): <https://jiffyclub.github.io/snakeviz/>\n* [gprof2dot](#gprof2dot): <https://github.com/jrfonseca/gprof2dot>\n* [flameprof](#flameprof): <https://github.com/baverman/flameprof>\n\n### SnakeViz\n\n#### Conda Installation\n\n```bash\nconda install -c conda-forge snakeviz\n```\n\n#### Basic Usage\n\n```bash\nsnakeviz <path/to/profiling_data>.prof --server\n```\n\n### GProf2Dot\n\n#### Conda Installation\n\n```bash\nconda install graphviz\nconda install -c conda-forge gprof2dot\n```\n\n#### Basic Usage\n\n```bash\npython -m gprof2dot -f pstats <path/to/profiling_data>.prof | dot -Tpng -o <path/to/profiling_data>.png\n```\n\n### FlameProf\n\n#### Pip Installation\n\n```bash\npip install flameprof\n```\n\n#### Basic Usage\n\n```bash\npython -m flameprof <path/to/profiling_data>.prof > <path/to/profiling_data>.svg\n```\n"
  },
  {
    "path": "docs/run_without_camera.md",
    "content": "\n# Run dot on image and video files instead of camera feed\n\n## Using Images\n\n```bash\ndot -c ./configs/simswap.yaml --target data/ --source \"data/\" --save_folder test_local/ --use_image --use_gpu\n```\n\n```bash\ndot -c ./configs/faceswap_cv2.yaml --target data/ --source \"data/\" --save_folder test_local/ --use_image --use_gpu\n```\n\n## Using Videos\n\n```\ndot -c ./configs/simswap.yaml --target \"/path/to/driving/video\" --source \"data/image.png\"  --save_folder test_local/ --use_gpu --use_video\n```\n\n```\ndot -c ./configs/fomm.yaml --target \"/path/to/driving/video\" --source \"data/image.png\"  --save_folder test_local/  --use_gpu --use_video\n```\n\n## Faceswap images from directory (Simswap)\n\nYou can pass a `--source` folder with images and some `--target` images. Faceswapped images will be generated at `--save_folder` including a metadata json file.\n\n```bash\npython scripts/image_swap.py --config <path_to_config/config.yaml> --source <path_to_source_images_folder> --target <path_to_target_images_folder> --save_folder <output_dir> --limit 100\n```\n\n## Faceswap images from metadata (SimSwap)\n\n```bash\npython scripts/metadata_swap.py --config <path_to_config/config.yaml> --local_root_path <path_to_root_directory> --metadata <path_to_metadata_file> --set <train_or_test_dataset> --save_folder <path_to_output_folder> --limit 100\n```\n\n## Faceswap on video files (SimSwap)\n\n```bash\npython scripts/video_swap.py -c <path_to_simpswap_config/config.yaml> -s <path_to_source_images> -t <path_to_target_videos> -o <path_to_output_folder> -d 5 -l 5\n```\n\n`-d 5` is optional to trim video in seconds\n`-l 5` is optional limit total swaps\n"
  },
  {
    "path": "envs/environment-apple-m2.yaml",
    "content": "---\nname: dot\nchannels:\n    - conda-forge\n    - defaults\ndependencies:\n    - python=3.8\n    - pip=21.3\n    - pip:\n          - -r ../requirements-apple-m2.txt\n"
  },
  {
    "path": "envs/environment-cpu.yaml",
    "content": "---\nname: dot\nchannels:\n    - conda-forge\n    - defaults\ndependencies:\n    - python=3.8\n    - pip=21.3\n    - pip:\n          - -r ../requirements.txt\n"
  },
  {
    "path": "envs/environment-gpu.yaml",
    "content": "---\nname: dot\nchannels:\n    - conda-forge\n    - defaults\ndependencies:\n    - python=3.8\n    - pip=21.3\n    - pip:\n          - onnxruntime-gpu==1.18.0\n          - -r ../requirements.txt\n"
  },
  {
    "path": "notebooks/colab_demo.ipynb",
    "content": "{\n  \"cells\": [\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"colab_type\": \"text\",\n        \"id\": \"view-in-github\"\n      },\n      \"source\": [\n        \"<a href=\\\"https://colab.research.google.com/github/sensity-ai/dot/blob/update-colab-notebook/notebooks/colab_demo.ipynb\\\" target=\\\"_parent\\\"><img src=\\\"https://colab.research.google.com/assets/colab-badge.svg\\\" alt=\\\"Open In Colab\\\"/></a>\"\n      ]\n    },\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"rOTJFaF9WIqg\"\n      },\n      \"source\": [\n        \"# Deepfake Offensive Toolkit\\n\",\n        \"\\n\",\n        \"> **Disclaimer**: This notebook is primarily used for demo purposes on Google Colab.\\n\",\n        \"\\n\",\n        \"**Note**: We recommend running this notebook on Google Colab with GPU enabled.\\n\",\n        \"\\n\",\n        \"To enable GPU, do the following:\\n\",\n        \"\\n\",\n        \"`Click \\\"Runtime\\\" tab > select \\\"Change runtime type\\\" option > set \\\"Hardware accelerator\\\" to \\\"GPU\\\"`\\n\",\n        \"\\n\",\n        \"### Install Notebook Pre-requisites:\\n\",\n        \"\\n\",\n        \"We install the following pre-requisities:\\n\",\n        \"- `ffmpeg`\\n\",\n        \"- `conda` (via [condacolab](https://github.com/conda-incubator/condacolab))\\n\",\n        \"\\n\",\n        \"Note: The notebook session will restart after installing the pre-requisites.\\n\",\n        \"\\n\",\n        \"**RUN THE BELOW CELL ONLY ONCE.**\\n\",\n        \"\\n\",\n        \"**ONCE THE NOTEBOOK SESSION RESTARTS, SKIP THIS CELL MOVE TO \\\"STEP 1\\\" SECTION OF THIS NOTEBOOK**\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"GnL7GZXGWIqo\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"# install linux pre-requisites\\n\",\n        \"!sudo apt install ffmpeg\\n\",\n        \"\\n\",\n        \"# install miniconda3\\n\",\n        \"!pip install -q condacolab\\n\",\n        \"import condacolab\\n\",\n        \"condacolab.install_miniconda()\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"9oI_egyVWIqq\"\n      },\n      \"source\": [\n        \"## Step 1 - Clone Repository\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"LvZL-BD0WIqq\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"import os\\n\",\n        \"os.chdir('/content')\\n\",\n        \"CODE_DIR = 'dot'\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"gTnnBM5xWIqr\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!git clone https://github.com/sensity-ai/dot.git $CODE_DIR\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"Hgx6JdrrWIqr\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"os.chdir(f'./{CODE_DIR}')\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"Nb3q4HbSWIqs\"\n      },\n      \"source\": [\n        \"## Step 2 - Setup Conda Environment\\n\",\n        \"\\n\",\n        \"**ONCE THE INSTALLATION IS COMPLETE, RESTART THE NOTEBOOK AND MOVE TO \\\"STEP 2\\\" SECTION OF THIS NOTEBOOK**\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"VkLiUqtbWIqt\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"# update base conda environment: install python=3.8 + cudatoolkit=11.8\\n\",\n        \"!conda install python=3.8 cudatoolkit=11.8\\n\",\n        \"\\n\",\n        \"# install pip requirements\\n\",\n        \"!pip install llvmlite==0.38.1 onnxruntime-gpu==1.9.0\\n\",\n        \"!pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cu118\\n\",\n        \"!pip install -r requirements.txt\\n\",\n        \"\\n\",\n        \"# install dot\\n\",\n        \"!pip install -e .\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"cuCaEkOiWIqy\"\n      },\n      \"source\": [\n        \"## Step 2 - Download Pretrained models\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"RVQqmGmsWIqy\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"%cd /content/dot\\n\",\n        \"\\n\",\n        \"# download binaries\\n\",\n        \"!gdown 1Qaf9hE62XSvgmxR43dfiwEPWWS_dXSCE\\n\",\n        \"\\n\",\n        \"# unzip binaries\\n\",\n        \"!unzip dot_model_checkpoints.zip\\n\",\n        \"\\n\",\n        \"# clean-up\\n\",\n        \"!rm -rf *.z*\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"IEYtimAjWIqz\"\n      },\n      \"source\": [\n        \"## Step 3: Run dot on image and video files instead of camera feed\\n\",\n        \"\\n\",\n        \"### Using SimSwap on Images\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"cA0H6ynvWIq0\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!dot \\\\\\n\",\n        \"-c ./configs/simswap.yaml \\\\\\n\",\n        \"--target \\\"data/\\\" \\\\\\n\",\n        \"--source \\\"data/\\\" \\\\\\n\",\n        \"--save_folder \\\"image_simswap_output/\\\" \\\\\\n\",\n        \"--use_image \\\\\\n\",\n        \"--use_gpu\\n\"\n      ]\n    },\n    {\n      \"cell_type\": \"markdown\",\n      \"metadata\": {\n        \"id\": \"MKbRDeSAWIq0\"\n      },\n      \"source\": [\n        \"### Using SimSwap on Videos\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"rJqqmy2vD8uf\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!dot \\\\\\n\",\n        \"-c ./configs/simswap.yaml \\\\\\n\",\n        \"--source \\\"data/\\\" \\\\\\n\",\n        \"--target \\\"data/\\\" \\\\\\n\",\n        \"--save_folder \\\"video_simswap_output/\\\" \\\\\\n\",\n        \"--limit 1 \\\\\\n\",\n        \"--use_video \\\\\\n\",\n        \"--use_gpu\"\n      ]\n    },\n    {\n      \"cell_type\": \"code\",\n      \"execution_count\": null,\n      \"metadata\": {\n        \"id\": \"oBJOJ2NWWIq1\"\n      },\n      \"outputs\": [],\n      \"source\": [\n        \"!python scripts/video_swap.py \\\\\\n\",\n        \"-s \\\"data/\\\" \\\\\\n\",\n        \"-t \\\"data/\\\" \\\\\\n\",\n        \"-o \\\"video_simswap_output/\\\" \\\\\\n\",\n        \"-d 5 \\\\\\n\",\n        \"-l 1\\n\"\n      ]\n    }\n  ],\n  \"metadata\": {\n    \"accelerator\": \"GPU\",\n    \"colab\": {\n      \"include_colab_link\": true,\n      \"provenance\": []\n    },\n    \"gpuClass\": \"standard\",\n    \"kernelspec\": {\n      \"display_name\": \"Python 3 (ipykernel)\",\n      \"language\": \"python\",\n      \"name\": \"python3\"\n    },\n    \"language_info\": {\n      \"codemirror_mode\": {\n        \"name\": \"ipython\",\n        \"version\": 3\n      },\n      \"file_extension\": \".py\",\n      \"mimetype\": \"text/x-python\",\n      \"name\": \"python\",\n      \"nbconvert_exporter\": \"python\",\n      \"pygments_lexer\": \"ipython3\"\n    }\n  },\n  \"nbformat\": 4,\n  \"nbformat_minor\": 0\n}\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[build-system]\nrequires = [\n  \"setuptools>=42\",\n  \"wheel\",\n]\nbuild-backend = \"setuptools.build_meta\"\n\n[tool.pytest.ini_options]\nfilterwarnings = [\"ignore:.*\"]\n"
  },
  {
    "path": "requirements-apple-m2.txt",
    "content": "#\n# This file is autogenerated by pip-compile with python 3.8\n# To update, run:\n#\n#    pip-compile setup.cfg\n#\nabsl-py==1.1.0\n    # via mediapipe\nattrs==21.4.0\n    # via mediapipe\ncertifi==2023.7.22\n    # via requests\nchardet==4.0.0\n    # via requests\nclick==8.0.2\n    # via dot (setup.cfg)\ncycler==0.11.0\n    # via matplotlib\ndlib==19.19.0\n    # via dot (setup.cfg)\nface-alignment==1.3.3\n    # via dot (setup.cfg)\nflatbuffers==2.0\n    # via onnxruntime\nfonttools==4.43.0\n    # via matplotlib\nidna==2.10\n    # via requests\nimageio==2.19.3\n    # via scikit-image\nkiwisolver==1.4.3\n    # via matplotlib\nkornia==0.6.5\n    # via dot (setup.cfg)\nllvmlite==0.38.1\n    # via numba\nmatplotlib==3.5.2\n    # via mediapipe\nmediapipe-silicon\n    # via dot (setup.cfg)\nmediapipe==0.10.3\nnetworkx==2.8.4\n    # via scikit-image\nnumba==0.55.2\n    # via face-alignment\nnumpy==1.22.0\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   imageio\n    #   matplotlib\n    #   mediapipe\n    #   numba\n    #   onnxruntime\n    #   opencv-contrib-python\n    #   opencv-python\n    #   pywavelets\n    #   scikit-image\n    #   scipy\n    #   tifffile\n    #   torchvision\nonnxruntime==1.15.1\n    # via dot (setup.cfg)\nopencv-contrib-python==4.5.5.62\n    # via\n    #   dot (setup.cfg)\n    #   mediapipe\nopencv-python==4.5.5.62\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\npackaging==21.3\n    # via\n    #   kornia\n    #   matplotlib\n    #   scikit-image\npillow==10.0.1\n    # via\n    #   dot (setup.cfg)\n    #   imageio\n    #   matplotlib\n    #   scikit-image\n    #   torchvision\nprotobuf==3.20.2\n    # via\n    #   dot (setup.cfg)\n    #   mediapipe\n    #   onnxruntime\npyparsing==3.0.9\n    # via\n    #   matplotlib\n    #   packaging\npython-dateutil==2.8.2\n    # via matplotlib\npywavelets==1.3.0\n    # via scikit-image\npyyaml==5.4.1\n    # via dot (setup.cfg)\nrequests==2.31.0\n    # via dot (setup.cfg)\nscikit-image==0.19.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\nscipy==1.10.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   scikit-image\nsix==1.16.0\n    # via\n    #   mediapipe\n    #   python-dateutil\ntifffile==2022.5.4\n    # via scikit-image\ntorch==2.0.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   kornia\n    #   torchvision\ntorchvision==0.15.2\n    # via dot (setup.cfg)\ntqdm==4.64.0\n    # via face-alignment\ntyping-extensions==4.3.0\n    # via torch\nurllib3==1.26.18\n    # via requests\nwheel==0.38.1\n    # via mediapipe\n\n# The following packages are considered to be unsafe in a requirements file:\n# setuptools\n"
  },
  {
    "path": "requirements-dev.txt",
    "content": "#\n# This file is autogenerated by pip-compile with Python 3.8\n# by the following command:\n#\n#    pip-compile --extra=dev --output-file=requirements-dev.txt --strip-extras setup.cfg\n#\nabsl-py==1.1.0\n    # via mediapipe\naltgraph==0.17.3\n    # via pyinstaller\nasttokens==2.0.5\n    # via stack-data\natomicwrites==1.4.1\n    # via pytest\nattrs==21.4.0\n    # via\n    #   mediapipe\n    #   pytest\nbackcall==0.2.0\n    # via ipython\nblack==22.3.0\n    # via dot (setup.cfg)\nbump2version==1.0.1\n    # via bumpversion\nbumpversion==0.6.0\n    # via dot (setup.cfg)\ncertifi==2023.7.22\n    # via requests\ncffi==1.15.1\n    # via sounddevice\ncfgv==3.3.1\n    # via pre-commit\ncharset-normalizer==3.2.0\n    # via requests\nclick==8.0.2\n    # via\n    #   black\n    #   dot (setup.cfg)\ncolorama==0.4.6\n    # via\n    #   click\n    #   ipython\n    #   pytest\n    #   tqdm\ncoloredlogs==15.0.1\n    # via onnxruntime-gpu\ncoverage==6.4.2\n    # via\n    #   coverage\n    #   pytest-cov\ncustomtkinter==5.2.0\n    # via dot (setup.cfg)\ncycler==0.11.0\n    # via matplotlib\ndarkdetect==0.8.0\n    # via customtkinter\ndecorator==5.1.1\n    # via\n    #   ipdb\n    #   ipython\ndistlib==0.3.4\n    # via virtualenv\ndlib==19.19.0\n    # via dot (setup.cfg)\nexecuting==0.8.3\n    # via stack-data\nface-alignment==1.4.1\n    # via dot (setup.cfg)\nfilelock==3.7.1\n    # via\n    #   torch\n    #   virtualenv\nflake8==3.9.2\n    # via dot (setup.cfg)\nflatbuffers==2.0\n    # via\n    #   mediapipe\n    #   onnxruntime-gpu\nfonttools==4.43.0\n    # via matplotlib\nhumanfriendly==10.0\n    # via coloredlogs\nidentify==2.5.1\n    # via pre-commit\nidna==2.10\n    # via requests\nimageio==2.19.3\n    # via scikit-image\niniconfig==1.1.1\n    # via pytest\nipdb==0.13.9\n    # via dot (setup.cfg)\nipython==8.10.0\n    # via\n    #   dot (setup.cfg)\n    #   ipdb\nisort==5.12.0\n    # via dot (setup.cfg)\njedi==0.18.1\n    # via ipython\njinja2==3.1.3\n    # via torch\nkiwisolver==1.4.3\n    # via matplotlib\nkornia==0.6.5\n    # via dot (setup.cfg)\nllvmlite==0.38.1\n    # via numba\nmarkupsafe==2.1.3\n    # via jinja2\nmatplotlib==3.5.2\n    # via mediapipe\nmatplotlib-inline==0.1.3\n    # via ipython\nmccabe==0.6.1\n    # via flake8\nmediapipe==0.10.3\n    # via dot (setup.cfg)\nmpmath==1.3.0\n    # via sympy\nmypy-extensions==0.4.3\n    # via black\nnetworkx==2.8.4\n    # via\n    #   scikit-image\n    #   torch\nnodeenv==1.7.0\n    # via pre-commit\nnumba==0.55.2\n    # via face-alignment\nnumpy==1.22.0\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   imageio\n    #   matplotlib\n    #   mediapipe\n    #   numba\n    #   onnxruntime-gpu\n    #   opencv-contrib-python\n    #   opencv-python\n    #   pywavelets\n    #   scikit-image\n    #   scipy\n    #   tifffile\n    #   torchvision\nonnxruntime-gpu==1.18.0\n    # via dot (setup.cfg)\nopencv-contrib-python==4.5.5.62\n    # via\n    #   dot (setup.cfg)\n    #   mediapipe\nopencv-python==4.5.5.62\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\npackaging==21.3\n    # via\n    #   kornia\n    #   matplotlib\n    #   onnxruntime-gpu\n    #   pytest\n    #   scikit-image\nparso==0.8.3\n    # via jedi\npathspec==0.9.0\n    # via black\npefile==2023.2.7\n    # via pyinstaller\npickleshare==0.7.5\n    # via ipython\npillow==10.0.1\n    # via\n    #   dot (setup.cfg)\n    #   imageio\n    #   matplotlib\n    #   scikit-image\n    #   torchvision\nplatformdirs==2.5.2\n    # via\n    #   black\n    #   virtualenv\npluggy==1.0.0\n    # via pytest\npre-commit==2.19.0\n    # via dot (setup.cfg)\nprompt-toolkit==3.0.30\n    # via ipython\nprotobuf==3.20.2\n    # via\n    #   dot (setup.cfg)\n    #   mediapipe\n    #   onnxruntime-gpu\npure-eval==0.2.2\n    # via stack-data\npy==1.11.0\n    # via pytest\npycodestyle==2.7.0\n    # via flake8\npycparser==2.21\n    # via cffi\npyflakes==2.3.1\n    # via flake8\npygments==2.15.0\n    # via ipython\npyinstaller==5.13.1\n    # via dot (setup.cfg)\npyinstaller-hooks-contrib==2023.5\n    # via pyinstaller\npyparsing==3.0.9\n    # via\n    #   matplotlib\n    #   packaging\npyreadline3==3.4.1\n    # via humanfriendly\npytest==7.1.2\n    # via\n    #   dot (setup.cfg)\n    #   pytest-cov\npytest-cov==3.0.0\n    # via dot (setup.cfg)\npython-dateutil==2.8.2\n    # via matplotlib\npywavelets==1.3.0\n    # via scikit-image\npywin32-ctypes==0.2.2\n    # via pyinstaller\npyyaml==5.4.1\n    # via\n    #   dot (setup.cfg)\n    #   pre-commit\nrequests==2.31.0\n    # via\n    #   dot (setup.cfg)\n    #   torchvision\nscikit-image==0.19.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\nscipy==1.10.0\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   scikit-image\nsix==1.16.0\n    # via\n    #   asttokens\n    #   python-dateutil\n    #   virtualenv\nsounddevice==0.4.6\n    # via mediapipe\nstack-data==0.3.0\n    # via ipython\nsympy==1.12\n    # via\n    #   onnxruntime-gpu\n    #   torch\ntifffile==2022.5.4\n    # via scikit-image\ntoml==0.10.2\n    # via\n    #   ipdb\n    #   pre-commit\ntomli==2.0.1\n    # via\n    #   black\n    #   coverage\n    #   pytest\ntorch==2.0.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   kornia\n    #   torchvision\ntorchvision==0.15.2\n    # via dot (setup.cfg)\ntqdm==4.64.0\n    # via face-alignment\ntraitlets==5.3.0\n    # via\n    #   ipython\n    #   matplotlib-inline\ntypes-pyyaml==6.0.10\n    # via dot (setup.cfg)\ntyping-extensions==4.3.0\n    # via\n    #   black\n    #   torch\nurllib3==1.26.18\n    # via requests\nvirtualenv==20.15.1\n    # via pre-commit\nwcwidth==0.2.5\n    # via prompt-toolkit\n\n# The following packages are considered to be unsafe in a requirements file:\n# setuptools\n"
  },
  {
    "path": "requirements.txt",
    "content": "#\n# This file is autogenerated by pip-compile with Python 3.8\n# by the following command:\n#\n#    pip-compile setup.cfg\n#\nabsl-py==1.1.0\n    # via mediapipe\nattrs==21.4.0\n    # via mediapipe\ncertifi==2023.7.22\n    # via requests\ncffi==1.15.1\n    # via sounddevice\ncharset-normalizer==3.2.0\n    # via requests\nclick==8.0.2\n    # via dot (setup.cfg)\ncolorama==0.4.6\n    # via\n    #   click\n    #   pytest\n    #   tqdm\ncoloredlogs==15.0.1\n    # via onnxruntime-gpu\ncustomtkinter==5.2.0\n    # via dot (setup.cfg)\ncycler==0.11.0\n    # via matplotlib\ndarkdetect==0.8.0\n    # via customtkinter\ndlib==19.19.0\n    # via dot (setup.cfg)\nexceptiongroup==1.1.2\n    # via pytest\nface-alignment==1.4.1\n    # via dot (setup.cfg)\nfilelock==3.12.2\n    # via torch\nflatbuffers==2.0\n    # via\n    #   mediapipe\n    #   onnxruntime-gpu\nfonttools==4.43.0\n    # via matplotlib\nhumanfriendly==10.0\n    # via coloredlogs\nidna==2.10\n    # via requests\nimageio==2.19.3\n    # via scikit-image\niniconfig==2.0.0\n    # via pytest\njinja2==3.1.3\n    # via torch\nkiwisolver==1.4.3\n    # via matplotlib\nkornia==0.6.5\n    # via dot (setup.cfg)\nllvmlite==0.38.1\n    # via numba\nmarkupsafe==2.1.3\n    # via jinja2\nmatplotlib==3.5.2\n    # via mediapipe\nmediapipe==0.10.3\n    # via dot (setup.cfg)\nmpmath==1.3.0\n    # via sympy\nnetworkx==2.8.4\n    # via\n    #   scikit-image\n    #   torch\nnumba==0.55.2\n    # via face-alignment\nnumpy==1.22.0\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   imageio\n    #   matplotlib\n    #   mediapipe\n    #   numba\n    #   onnxruntime-gpu\n    #   opencv-contrib-python\n    #   opencv-python\n    #   pywavelets\n    #   scikit-image\n    #   scipy\n    #   tifffile\n    #   torchvision\nonnxruntime-gpu==1.18.0\n    # via dot (setup.cfg)\nopencv-contrib-python==4.5.5.62\n    # via\n    #   dot (setup.cfg)\n    #   mediapipe\nopencv-python==4.5.5.62\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\npackaging==21.3\n    # via\n    #   kornia\n    #   matplotlib\n    #   onnxruntime-gpu\n    #   pytest\n    #   scikit-image\npillow==10.0.1\n    # via\n    #   dot (setup.cfg)\n    #   imageio\n    #   matplotlib\n    #   scikit-image\n    #   torchvision\npluggy==1.2.0\n    # via pytest\nprotobuf==3.20.2\n    # via\n    #   dot (setup.cfg)\n    #   mediapipe\n    #   onnxruntime-gpu\npycparser==2.21\n    # via cffi\npyparsing==3.0.9\n    # via\n    #   matplotlib\n    #   packaging\npyreadline3==3.4.1\n    # via humanfriendly\npytest==7.4.0\n    # via dot (setup.cfg)\npython-dateutil==2.8.2\n    # via matplotlib\npywavelets==1.3.0\n    # via scikit-image\npyyaml==5.4.1\n    # via dot (setup.cfg)\nrequests==2.31.0\n    # via\n    #   dot (setup.cfg)\n    #   torchvision\nscikit-image==0.19.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\nscipy==1.10.0\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   scikit-image\nsix==1.16.0\n    # via python-dateutil\nsounddevice==0.4.6\n    # via mediapipe\nsympy==1.12\n    # via\n    #   onnxruntime-gpu\n    #   torch\ntifffile==2022.5.4\n    # via scikit-image\ntomli==2.0.1\n    # via pytest\ntorch==2.0.1\n    # via\n    #   dot (setup.cfg)\n    #   face-alignment\n    #   kornia\n    #   torchvision\ntorchvision==0.15.2\n    # via dot (setup.cfg)\ntqdm==4.64.0\n    # via face-alignment\ntyping-extensions==4.3.0\n    # via torch\nurllib3==1.26.18\n    # via requests\n\n# The following packages are considered to be unsafe in a requirements file:\n# setuptools\n"
  },
  {
    "path": "scripts/image_swap.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport glob\nimport json\nimport os\n\nimport click\nimport yaml\n\nimport dot\n\n\"\"\"\nUsage:\n    python image_swap.py\n    -c <path/to/config>\n    -s <path/to/source/images>\n    -t <path/to/target/images>\n    -o <path/to/output/folder>\n    -l 5(Optional limit total swaps)\n\"\"\"\n\n\n@click.command()\n@click.option(\"-c\", \"--config\", default=\"./src/dot/simswap/configs/config.yaml\")\n@click.option(\"-s\", \"--source\", required=True)\n@click.option(\"-t\", \"--target\", required=True)\n@click.option(\"-o\", \"--save_folder\", required=False)\n@click.option(\"-l\", \"--limit\", type=int, required=False)\ndef main(\n    config: str, source: str, target: str, save_folder: str, limit: int = False\n) -> None:\n    \"\"\"Performs face-swap given a `source/target` image(s). Saves JSON file of (un)successful swaps.\n    Args:\n        config (str): Path to DOT configuration yaml file.\n        source (str): Path to source images folder or certain image file.\n        target (str): Path to target images folder or certain image file.\n        save_folder (str): Output folder to store face-swaps and metadata file.\n        limit (int, optional): Number of desired face-swaps. If not specified,\n        all possible combinations of source/target pairs will be processed. Defaults to False.\n    \"\"\"\n\n    print(f\"Loading config: {config}\")\n    with open(config) as f:\n        config = yaml.safe_load(f)\n\n    _dot = dot.DOT(use_cam=False, use_video=False, save_folder=save_folder)\n\n    analysis_config = config[\"analysis\"][\"simswap\"]\n    option = _dot.simswap(\n        use_gpu=analysis_config.get(\"use_gpu\", False),\n        use_mask=analysis_config.get(\"opt_use_mask\", False),\n        gpen_type=analysis_config.get(\"gpen\", None),\n        gpen_path=analysis_config.get(\"gpen_path\", None),\n        crop_size=analysis_config.get(\"opt_crop_size\", 224),\n    )\n\n    swappedMD, rejectedMD = _dot.generate(\n        option, source=source, target=target, limit=limit, **analysis_config\n    )\n\n    # save metadata file\n    if swappedMD:\n        with open(os.path.join(save_folder, \"metadata.json\"), \"a\") as fp:\n            json.dump(swappedMD, fp, indent=4)\n\n    # save rejected face-swaps\n    if rejectedMD:\n        with open(os.path.join(save_folder, \"rejected.json\"), \"a\") as fp:\n            json.dump(rejectedMD, fp, indent=4)\n\n\ndef find_images_from_path(path):\n    if os.path.isfile(path):\n        return [path]\n\n    try:\n        return int(path)\n    except ValueError:\n        # supported extensions\n        ext = [\"png\", \"jpg\", \"jpeg\"]\n        files = []\n        [files.extend(glob.glob(path + \"**/*.\" + e, recursive=True)) for e in ext]\n\n        return files\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "scripts/metadata_swap.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport json\nimport os\n\nimport click\nimport numpy as np\nimport pandas as pd\nimport yaml\n\nimport dot\n\n\"\"\"\nUsage:\n    python metadata_swap.py \\\n    --config <path_to_config/config.yaml> \\\n    --local_root_path <path_to_root_directory> \\\n    --metadata <path_to_metadata_file> \\\n    --set <train_or_test_dataset> \\\n    --save_folder <path_to_output_folder> \\\n    --limit 100\n\"\"\"\n\n# common face identity features\nface_identity_features = {\n    1: \"ArchedEyebrows\",\n    2: \"Attractive\",\n    3: \"BagsUnderEyes\",\n    6: \"BigLips\",\n    7: \"BigNose\",\n    12: \"BushyEyebrows\",\n    16: \"Goatee\",\n    18: \"HeavyMakeup\",\n    19: \"HighCheekbones\",\n    22: \"Mustache\",\n    23: \"NarrowEyes\",\n    24: \"NoBeard\",\n    27: \"PointyNose\",\n}\n\n\n@click.command()\n@click.option(\"-c\", \"--config\", default=\"./src/dot/simswap/configs/config.yaml\")\n@click.option(\"--local_root_path\", required=True)\n@click.option(\"--metadata\", required=True)\n@click.option(\"--set\", required=True)\n@click.option(\"-o\", \"--save_folder\", required=False)\n@click.option(\"--limit\", type=int, required=False)\ndef main(\n    config: str,\n    local_root_path: str,\n    metadata: str,\n    set: str,\n    save_folder: str,\n    limit: bool = None,\n) -> None:\n    \"\"\"Script is tailored to dictionary format as shown below. `key` is the relative path to image,\n    `value` is a list of total 44 attributes.\n    [0:40] `Face attributes`: 50'ClockShadow, ArchedEyebrows, Attractive, BagsUnderEyes, Bald,Bangs,BigLips,\n    BigNose, BlackHair, BlondHair, Blurry, BrownHair, BushyEyebrows, Chubby, DoubleChin ,Eyeglasses,Goatee,\n    GrayHair, HeavyMakeup, HighCheekbones, Male, MouthSlightlyOpen, Mustache, NarrowEyes, NoBeard, OvalFace,\n    PaleSkin, PointyNose, RecedingHairline, RosyCheeks, Sideburns, Smiling, StraightHair, WavyHair, WearingEarrings,\n    WearingHat, WearingLipstick, WearingNecklace, WearingNecktie, Young.\n    [41] `Spoof type`: Live, Photo, Poster, A4, Face Mask, Upper Body Mask, Region Mask, PC, Pa, Phone, 3D Mask.\n    [42] `Illumination`: Live, Normal, Strong, Back, Dark.\n    [43] `Live/Spoof(binary)`: Live, Spoof.\n    {\n        \"rel_path/img1.png\": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,2,2,1],\n        \"rel_path/img2.png\": [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,1,2,1]\n        ....\n    }\n    It constructs a pd.DataFrame from `metadata` and filters rows where examples are under-aged(young==0).\n    Face-swaps are performed randomly based on gender. The `result-swap` image shares common attributes with\n    the `source` image which are defined in `face_identity_features` dict.\n    Spoof-type of swapped image is defined at index 40 of attributes list and set to 11.\n    Args:\n        config (str): Path to DOT configuration yaml file.\n        local_root_path (str): Root path of dataset.\n        metadata (str): JSON metadata file path of dataset.\n        set (str): Defines train/test dataset.\n        save_folder (str): Output folder to store face-swaps and metadata file.\n        limit (int, optional): Number of desired face-swaps. If not specified, will be set equal to DataFrame size.\n        Defaults to False.\n    \"\"\"\n    if limit and limit < 4:\n        print(\"Error: limit should be >= 4\")\n        return\n\n    output_data_folder = os.path.join(save_folder + f\"Data/{set}/swap/\")\n    df = pd.read_json(metadata, orient=\"index\")\n\n    mapping = {\n        df.columns[20]: \"gender\",\n        df.columns[26]: \"pale_skin\",\n        df.columns[39]: \"young\",\n    }\n    df = df.rename(columns=mapping)\n    df.head()\n    # keep only live images\n    df = df.loc[df.index.str.contains(\"live\")]\n    # keep only adult images\n    df = df.loc[df[\"young\"] == 0]\n    if not limit:\n        limit = df.shape[0]\n        print(f\"Limit is set to: {limit}\")\n\n    filters = [\"gender==1\", \"gender==0\"]\n    swaps = []\n    for filter in filters:\n        # get n random rows based on condition ==1(male)\n        filtered = df.query(filter).sample(n=round(limit / len(filters)), replace=True)\n        # shuffle again, keep only indices and convert to list\n        filtered = filtered.sample(frac=1).index.tolist()\n        # append local_root_path\n        filtered = [os.path.join(local_root_path, p) for p in filtered]\n        # split into two lists roughly equal size\n        mid_index = round(len(filtered) / 2)\n        src = filtered[0:mid_index]\n        tar = filtered[mid_index:]\n        swaps.append((src, tar))\n\n    print(f\"Loading config: {config}\")\n    with open(config) as f:\n        config = yaml.safe_load(f)\n\n    analysis_config = config[\"analysis\"][\"simswap\"]\n    _dot = dot.DOT(use_video=False, save_folder=output_data_folder)\n    _dot.use_cam = False\n    option = _dot.build_option(\n        swap_type=\"simswap\",\n        use_gpu=analysis_config.get(\"use_gpu\", False),\n        use_mask=analysis_config.get(\"opt_use_mask\", False),\n        gpen_type=analysis_config.get(\"gpen\", None),\n        gpen_path=analysis_config.get(\"gpen_path\", None),\n        crop_size=analysis_config.get(\"opt_crop_size\", 224),\n    )\n    total_succeed = {}\n    total_failed = {}\n    for swap in swaps:\n        source_list = swap[0]\n        target_list = swap[1]\n        # perform faceswap\n        for source, target in zip(source_list, target_list):\n            success, rejections = _dot.generate(\n                option,\n                source=source,\n                target=target,\n                duration=None,\n                **analysis_config,\n            )\n\n        total_succeed = {**total_succeed, **success}\n        total_failed = {**total_failed, **rejections}\n\n    # save succeed face-swaps file\n    if total_succeed:\n        # append attribute list for source/target images\n        for key, value in total_succeed.items():\n            src_attr = (\n                df.loc[df.index == value[\"source\"][\"path\"].replace(local_root_path, \"\")]\n                .iloc[0, 0:]\n                .tolist()\n            )\n            tar_attr = (\n                df.loc[df.index == value[\"target\"][\"path\"].replace(local_root_path, \"\")]\n                .iloc[0, 0:]\n                .tolist()\n            )\n\n            total_succeed[key][\"source\"][\"attr\"] = src_attr\n            total_succeed[key][\"target\"][\"attr\"] = tar_attr\n\n        with open(os.path.join(save_folder, \"swaps_succeed.json\"), \"w\") as fp:\n            json.dump(total_succeed, fp)\n\n    # save failed face-swaps file\n    if total_failed:\n        with open(os.path.join(save_folder, \"swaps_failed.json\"), \"w\") as fp:\n            json.dump(total_failed, fp)\n\n    # format metadata to appropriate format\n    formatted = format_swaps(total_succeed)\n\n    # save file\n    if formatted:\n        with open(os.path.join(save_folder, f\"{set}_label_swap.json\"), \"w\") as fp:\n            json.dump(formatted, fp)\n\n\ndef format_swaps(succeeds):\n    formatted = {}\n    for key, value in succeeds.items():\n        # attributes of source image\n        src_attr = np.asarray(value[\"source\"][\"attr\"])\n        # attributes of target image\n        tar_attr = np.asarray(value[\"target\"][\"attr\"])\n        # attributes of swapped image. copy from target image\n        swap_attr = tar_attr\n        # transfer facial attributes from source image\n        for idx in face_identity_features.keys():\n            swap_attr[idx] = src_attr[idx]\n\n        # swap-spoof-type-11, FaceSwap\n        swap_attr[40] = 11\n        # store in dict\n        formatted[key] = swap_attr.tolist()\n\n    return formatted\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "scripts/profile_simswap.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport cProfile\nimport glob\nimport os\nimport pstats\n\nimport click\nimport yaml\n\nimport dot\n\n# define globals\nCONFIG = \"./src/dot/simswap/configs/config.yaml\"\nSOURCE = \"data/obama.jpg\"\nTARGET = \"data/mona.jpg\"\nSAVE_FOLDER = \"./profile_output/\"\nLIMIT = 1\n\n\n@click.command()\n@click.option(\"-c\", \"--config\", default=CONFIG)\n@click.option(\"--source\", default=SOURCE)\n@click.option(\"--target\", default=TARGET)\n@click.option(\"--save_folder\", default=SAVE_FOLDER)\n@click.option(\"--limit\", type=int, default=LIMIT)\ndef main(\n    config=CONFIG, source=SOURCE, target=TARGET, save_folder=SAVE_FOLDER, limit=LIMIT\n):\n\n    profiler = cProfile.Profile()\n\n    with open(config) as f:\n        config = yaml.safe_load(f)\n\n    analysis_config = config[\"analysis\"][\"simswap\"]\n    _dot = dot.DOT(use_cam=False, use_video=False, save_folder=save_folder)\n    option = _dot.simswap(\n        use_gpu=config[\"analysis\"][\"simswap\"][\"use_gpu\"],\n        gpen_type=config[\"analysis\"][\"simswap\"][\"gpen\"],\n        gpen_path=config[\"analysis\"][\"simswap\"][\"gpen_path\"],\n        use_mask=config[\"analysis\"][\"simswap\"][\"opt_use_mask\"],\n        crop_size=config[\"analysis\"][\"simswap\"][\"opt_crop_size\"],\n    )\n    option.create_model(**analysis_config)\n    profiler.enable()\n\n    swappedMD, rejectedMD = _dot.generate(\n        option,\n        source=source,\n        target=target,\n        limit=limit,\n        profiler=True,\n        **analysis_config\n    )\n    profiler.disable()\n    stats = pstats.Stats(profiler)\n    stats.dump_stats(\"SimSwap_profiler.prof\")\n\n\ndef find_images_from_path(path):\n    if os.path.isfile(path):\n        return [path]\n\n    try:\n        return int(path)\n    except ValueError:\n        # supported extensions\n        ext = [\"png\", \"jpg\", \"jpeg\"]\n        files = []\n        [files.extend(glob.glob(path + \"**/*.\" + e, recursive=True)) for e in ext]\n\n        return files\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "scripts/video_swap.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport click\nimport yaml\n\nimport dot\n\n\"\"\"\nUsage:\n    python video_swap.py\n    -c <path/to/config>\n    -s <path/to/source/images>\n    -t <path/to/target/videos>\n    -o <path/to/output/folder>\n    -d 5(Optional trim video)\n    -l 5(Optional limit total swaps)\n\"\"\"\n\n\n@click.command()\n@click.option(\"-c\", \"--config\", default=\"./src/dot/simswap/configs/config.yaml\")\n@click.option(\"-s\", \"--source_image_path\", required=True)\n@click.option(\"-t\", \"--target_video_path\", required=True)\n@click.option(\"-o\", \"--output\", required=True)\n@click.option(\"-d\", \"--duration_per_video\", required=False)\n@click.option(\"-l\", \"--limit\", type=int, required=False)\ndef main(\n    config: str,\n    source_image_path: str,\n    target_video_path: str,\n    output: str,\n    duration_per_video: int,\n    limit: int = None,\n):\n    \"\"\"Given `source` and `target` folders, performs face-swap on each video with randomly chosen\n    image found `source` path.\n    Supported image formats: `[\"jpg\", \"png\", \"jpeg\"]`\n    Supported video formats: `[\"avi\", \"mp4\", \"mov\", \"MOV\"]`\n\n    Args:\n        config (str): Path to configuration file.\n        source_image_path (str): Path to source images\n        target_video_path (str): Path to target videos\n        output (str): Output folder path.\n        duration_per_video (int): Trim duration of target video in seconds.\n        limit (int, optional): Limit number of video-swaps. Defaults to None.\n    \"\"\"\n    print(f\"Loading config: {config}\")\n    with open(config) as f:\n        config = yaml.safe_load(f)\n\n    _dot = dot.DOT(use_cam=False, use_video=True, save_folder=output)\n\n    analysis_config = config[\"analysis\"][\"simswap\"]\n    option = _dot.simswap(\n        use_gpu=analysis_config.get(\"use_gpu\", False),\n        use_mask=analysis_config.get(\"opt_use_mask\", False),\n        gpen_type=analysis_config.get(\"gpen\", None),\n        gpen_path=analysis_config.get(\"gpen_path\", None),\n        crop_size=analysis_config.get(\"opt_crop_size\", 224),\n    )\n    _dot.generate(\n        option=option,\n        source=source_image_path,\n        target=target_video_path,\n        duration=duration_per_video,\n        limit=limit,\n        **analysis_config,\n    )\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "setup.cfg",
    "content": "[bumpversion]\ncurrent_version = 1.4.0\ncommit = True\ntag = False\nparse = (?P<major>\\d+)\\.(?P<minor>\\d+)\\.(?P<patch>\\d+)?\nserialize = \n\t{major}.{minor}.{patch}\n\n[bumpversion:file:src/dot/__init__.py]\nsearch = __version__ = \"{current_version}\"\nreplace = __version__ = \"{new_version}\"\n\n[metadata]\nname = dot\nversion = attr: dot.__version__\nauthor = attr: dot.__author__\ndescription = attr: dot.__doc__\nlong_description = file: README.md\nlog_description_content_type = text/markdown\nurl = attr: dot.__url__\nlicense = BSD 3-Clause License\nclassifiers = \n\tProgramming Language :: Python :: 3.8\n\n[options]\npackage_dir = \n\t= src\npackages = find:\npython_requires = >=3.8,<3.9\ninstall_requires = \n\tclick\n\tdlib\n\tface_alignment==1.4.1\n\tkornia\n\tmediapipe\n\tnumpy\n\tonnxruntime-gpu==1.18.0\n\topencv-contrib-python\n\topencv_python\n\tPillow\n\tprotobuf\n\tPyYAML\n\trequests\n\tscikit_image\n\tscipy\n\ttorch==2.0.1\n\ttorchvision==0.15.2\n\tcustomtkinter\n\tpytest\n\n[options.extras_require]\ndev = \n\tblack\n\tbumpversion\n\tflake8\n\tipdb\n\tipython\n\tisort==5.12.0\n\tpre-commit\n\tpyinstaller\n\tpytest\n\tpytest-cov\n\ttypes-PyYAML\n\n[options.packages.find]\nwhere = src\n\n[options.entry_points]\nconsole_scripts = \n\tdot = dot.__main__:main\n\tdot-ui = dot.ui.ui:main\n"
  },
  {
    "path": "src/dot/__init__.py",
    "content": "#!/usr/bin/env python3\r\n\"\"\"\r\nCopyright (c) 2022, Sensity B.V. All rights reserved.\r\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\r\n\"\"\"\r\n\r\nfrom .dot import DOT\r\n\r\n__version__ = \"1.4.0\"\r\n__author__ = \"Sensity\"\r\n__url__ = \"https://github.com/sensity-ai/dot/tree/main/dot\"\r\n__docs__ = \"Deepfake offensive toolkit\"\r\n__all__ = [\"DOT\"]\r\n"
  },
  {
    "path": "src/dot/__main__.py",
    "content": "#!/usr/bin/env python3\r\n\"\"\"\r\nCopyright (c) 2022, Sensity B.V. All rights reserved.\r\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\r\n\"\"\"\r\n\r\nimport traceback\r\nfrom typing import Union\r\n\r\nimport click\r\nimport yaml\r\n\r\nfrom .dot import DOT\r\n\r\n\r\ndef run(\r\n    swap_type: str,\r\n    source: str,\r\n    target: Union[int, str],\r\n    model_path: str = None,\r\n    parsing_model_path: str = None,\r\n    arcface_model_path: str = None,\r\n    checkpoints_dir: str = None,\r\n    gpen_type: str = None,\r\n    gpen_path: str = \"saved_models/gpen\",\r\n    crop_size: int = 224,\r\n    head_pose: bool = False,\r\n    save_folder: str = None,\r\n    show_fps: bool = False,\r\n    use_gpu: bool = False,\r\n    use_video: bool = False,\r\n    use_image: bool = False,\r\n    limit: int = None,\r\n):\r\n    \"\"\"Builds a DOT object and runs it.\r\n\r\n    Args:\r\n        swap_type (str): The type of swap to run.\r\n        source (str): The source image or video.\r\n        target (Union[int, str]): The target image or video.\r\n        model_path (str, optional): The path to the model's weights. Defaults to None.\r\n        parsing_model_path (str, optional): The path to the parsing model. Defaults to None.\r\n        arcface_model_path (str, optional): The path to the arcface model. Defaults to None.\r\n        checkpoints_dir (str, optional): The path to the checkpoints directory. Defaults to None.\r\n        gpen_type (str, optional): The type of gpen model to use. Defaults to None.\r\n        gpen_path (str, optional): The path to the gpen models. Defaults to \"saved_models/gpen\".\r\n        crop_size (int, optional): The size to crop the images to. Defaults to 224.\r\n        save_folder (str, optional): The path to the save folder. Defaults to None.\r\n        show_fps (bool, optional): Pass flag to show fps value. Defaults to False.\r\n        use_gpu (bool, optional): Pass flag to use GPU else use CPU. Defaults to False.\r\n        use_video (bool, optional): Pass flag to use video-swap pipeline. Defaults to False.\r\n        use_image (bool, optional): Pass flag to use image-swap pipeline. Defaults to False.\r\n        limit (int, optional): The number of frames to process. Defaults to None.\r\n    \"\"\"\r\n    try:\r\n        # initialize dot\r\n        _dot = DOT(use_video=use_video, use_image=use_image, save_folder=save_folder)\r\n\r\n        # build dot\r\n        option = _dot.build_option(\r\n            swap_type=swap_type,\r\n            use_gpu=use_gpu,\r\n            gpen_type=gpen_type,\r\n            gpen_path=gpen_path,\r\n            crop_size=crop_size,\r\n        )\r\n\r\n        # run dot\r\n        _dot.generate(\r\n            option=option,\r\n            source=source,\r\n            target=target,\r\n            show_fps=show_fps,\r\n            model_path=model_path,\r\n            limit=limit,\r\n            parsing_model_path=parsing_model_path,\r\n            arcface_model_path=arcface_model_path,\r\n            checkpoints_dir=checkpoints_dir,\r\n            opt_crop_size=crop_size,\r\n            head_pose=head_pose,\r\n        )\r\n    except:  # noqa\r\n        print(traceback.format_exc())\r\n\r\n\r\n@click.command()\r\n@click.option(\r\n    \"--swap_type\",\r\n    \"swap_type\",\r\n    type=click.Choice([\"fomm\", \"faceswap_cv2\", \"simswap\"], case_sensitive=False),\r\n)\r\n@click.option(\r\n    \"--source\",\r\n    \"source\",\r\n    required=True,\r\n    help=\"Images to swap with target\",\r\n)\r\n@click.option(\r\n    \"--target\",\r\n    \"target\",\r\n    required=True,\r\n    help=\"Cam ID or target media\",\r\n)\r\n@click.option(\r\n    \"--model_path\",\r\n    \"model_path\",\r\n    default=None,\r\n    help=\"Path to 68-point facial landmark detector for FaceSwap-cv2 or to the model's weights for the FOM\",\r\n)\r\n@click.option(\r\n    \"--parsing_model_path\",\r\n    \"parsing_model_path\",\r\n    default=None,\r\n    help=\"Path to the parsing model\",\r\n)\r\n@click.option(\r\n    \"--arcface_model_path\",\r\n    \"arcface_model_path\",\r\n    default=None,\r\n    help=\"Path to arcface model\",\r\n)\r\n@click.option(\r\n    \"--checkpoints_dir\",\r\n    \"checkpoints_dir\",\r\n    default=None,\r\n    help=\"models are saved here\",\r\n)\r\n@click.option(\r\n    \"--gpen_type\",\r\n    \"gpen_type\",\r\n    default=None,\r\n    type=click.Choice([\"gpen_256\", \"gpen_512\"]),\r\n)\r\n@click.option(\r\n    \"--gpen_path\",\r\n    \"gpen_path\",\r\n    default=\"saved_models/gpen\",\r\n    help=\"Path to gpen models.\",\r\n)\r\n@click.option(\"--crop_size\", \"crop_size\", type=int, default=224)\r\n@click.option(\"--save_folder\", \"save_folder\", type=str, default=None)\r\n@click.option(\r\n    \"--show_fps\",\r\n    \"show_fps\",\r\n    type=bool,\r\n    default=False,\r\n    is_flag=True,\r\n    help=\"Pass flag to show fps value.\",\r\n)\r\n@click.option(\r\n    \"--use_gpu\",\r\n    \"use_gpu\",\r\n    type=bool,\r\n    default=False,\r\n    is_flag=True,\r\n    help=\"Pass flag to use GPU else use CPU.\",\r\n)\r\n@click.option(\r\n    \"--use_video\",\r\n    \"use_video\",\r\n    type=bool,\r\n    default=False,\r\n    is_flag=True,\r\n    help=\"Pass flag to use video-swap pipeline.\",\r\n)\r\n@click.option(\r\n    \"--use_image\",\r\n    \"use_image\",\r\n    type=bool,\r\n    default=False,\r\n    is_flag=True,\r\n    help=\"Pass flag to use image-swap pipeline.\",\r\n)\r\n@click.option(\"--limit\", \"limit\", type=int, default=None)\r\n@click.option(\r\n    \"-c\",\r\n    \"--config\",\r\n    \"config_file\",\r\n    help=\"Configuration file. Overrides duplicate options passed.\",\r\n    required=False,\r\n    default=None,\r\n)\r\ndef main(\r\n    swap_type: str,\r\n    source: str,\r\n    target: Union[int, str],\r\n    model_path: str = None,\r\n    parsing_model_path: str = None,\r\n    arcface_model_path: str = None,\r\n    checkpoints_dir: str = None,\r\n    gpen_type: str = None,\r\n    gpen_path: str = \"saved_models/gpen\",\r\n    crop_size: int = 224,\r\n    save_folder: str = None,\r\n    show_fps: bool = False,\r\n    use_gpu: bool = False,\r\n    use_video: bool = False,\r\n    use_image: bool = False,\r\n    limit: int = None,\r\n    config_file: str = None,\r\n):\r\n    \"\"\"CLI entrypoint for dot.\"\"\"\r\n    # load config, if provided\r\n    config = {}\r\n    if config_file is not None:\r\n        with open(config_file) as f:\r\n            config = yaml.safe_load(f)\r\n\r\n    # run dot\r\n    run(\r\n        swap_type=config.get(\"swap_type\", swap_type),\r\n        source=source,\r\n        target=target,\r\n        model_path=config.get(\"model_path\", model_path),\r\n        parsing_model_path=config.get(\"parsing_model_path\", parsing_model_path),\r\n        arcface_model_path=config.get(\"arcface_model_path\", arcface_model_path),\r\n        checkpoints_dir=config.get(\"checkpoints_dir\", checkpoints_dir),\r\n        gpen_type=config.get(\"gpen_type\", gpen_type),\r\n        gpen_path=config.get(\"gpen_path\", gpen_path),\r\n        crop_size=config.get(\"crop_size\", crop_size),\r\n        head_pose=config.get(\"head_pose\", False),\r\n        save_folder=save_folder,\r\n        show_fps=show_fps,\r\n        use_gpu=use_gpu,\r\n        use_video=use_video,\r\n        use_image=use_image,\r\n        limit=limit,\r\n    )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n    main()\r\n"
  },
  {
    "path": "src/dot/commons/__init__.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nfrom .model_option import ModelOption\n\n__all__ = [\"ModelOption\"]\n"
  },
  {
    "path": "src/dot/commons/cam/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/commons/cam/cam.py",
    "content": "#!/usr/bin/env python3\n\nimport glob\nimport os\n\nimport cv2\nimport numpy as np\nimport requests\nimport yaml\n\nfrom ..utils import info, resize\nfrom .camera_selector import query_cameras\n\n\ndef is_new_frame_better(log, source, driving, predictor):\n    global avatar_kp\n    global display_string\n\n    if avatar_kp is None:\n        display_string = \"No face detected in avatar.\"\n        return False\n\n    if predictor.get_start_frame() is None:\n        display_string = \"No frame to compare to.\"\n        return True\n\n    _ = resize(driving, (128, 128))[..., :3]\n    new_kp = predictor.get_frame_kp(driving)\n\n    if new_kp is not None:\n        new_norm = (np.abs(avatar_kp - new_kp) ** 2).sum()\n        old_norm = (np.abs(avatar_kp - predictor.get_start_frame_kp()) ** 2).sum()\n\n        out_string = \"{0} : {1}\".format(int(new_norm * 100), int(old_norm * 100))\n        display_string = out_string\n        log(out_string)\n\n        return new_norm < old_norm\n    else:\n        display_string = \"No face found!\"\n        return False\n\n\ndef load_stylegan_avatar(IMG_SIZE=256):\n\n    url = \"https://thispersondoesnotexist.com/image\"\n    r = requests.get(url, headers={\"User-Agent\": \"My User Agent 1.0\"}).content\n\n    image = np.frombuffer(r, np.uint8)\n    image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n    image = resize(image, (IMG_SIZE, IMG_SIZE))\n\n    return image\n\n\ndef load_images(log, opt_avatars, IMG_SIZE=256):\n    avatars = []\n    filenames = []\n    images_list = sorted(glob.glob(f\"{opt_avatars}/*\"))\n    for i, f in enumerate(images_list):\n        if f.endswith(\".jpg\") or f.endswith(\".jpeg\") or f.endswith(\".png\"):\n            img = cv2.imread(f)\n            if img is None:\n                log(\"Failed to open image: {}\".format(f))\n                continue\n\n            if img.ndim == 2:\n                img = np.tile(img[..., None], [1, 1, 3])\n            img = img[..., :3][..., ::-1]\n            img = resize(img, (IMG_SIZE, IMG_SIZE))\n            avatars.append(img)\n            filenames.append(f)\n    return avatars, filenames\n\n\ndef draw_rect(img, rw=0.6, rh=0.8, color=(255, 0, 0), thickness=2):\n    h, w = img.shape[:2]\n    _l = w * (1 - rw) // 2\n    r = w - _l\n    u = h * (1 - rh) // 2\n    d = h - u\n    img = cv2.rectangle(img, (int(_l), int(u)), (int(r), int(d)), color, thickness)\n\n\ndef kp_to_pixels(arr):\n    \"\"\"Convert normalized landmark locations to screen pixels\"\"\"\n    return ((arr + 1) * 127).astype(np.int32)\n\n\ndef draw_face_landmarks(LANDMARK_SLICE_ARRAY, img, face_kp, color=(20, 80, 255)):\n\n    if face_kp is not None:\n        img = cv2.polylines(\n            img, np.split(kp_to_pixels(face_kp), LANDMARK_SLICE_ARRAY), False, color\n        )\n\n\ndef print_help(avatar_names):\n    info(\"\\n\\n=== Control keys ===\")\n    info(\"1-9: Change avatar\")\n    for i, fname in enumerate(avatar_names):\n        key = i + 1\n        name = fname.split(\"/\")[-1]\n        info(f\"{key}: {name}\")\n    info(\"W: Zoom camera in\")\n    info(\"S: Zoom camera out\")\n    info(\"A: Previous avatar in folder\")\n    info(\"D: Next avatar in folder\")\n    info(\"Q: Get random avatar\")\n    info(\"X: Calibrate face pose\")\n    info(\"I: Show FPS\")\n    info(\"ESC: Quit\")\n    info(\"\\nFull key list: https://github.com/alievk/avatarify#controls\")\n    info(\"\\n\\n\")\n\n\ndef draw_fps(\n    frame,\n    fps,\n    timing,\n    x0=10,\n    y0=20,\n    ystep=30,\n    fontsz=0.5,\n    color=(255, 255, 255),\n    IMG_SIZE=256,\n):\n\n    frame = frame.copy()\n    black = (0, 0, 0)\n    black_thick = 2\n\n    cv2.putText(\n        frame,\n        f\"FPS: {fps:.1f}\",\n        (x0, y0 + ystep * 0),\n        0,\n        fontsz * IMG_SIZE / 256,\n        (0, 0, 0),\n        black_thick,\n    )\n    cv2.putText(\n        frame,\n        f\"FPS: {fps:.1f}\",\n        (x0, y0 + ystep * 0),\n        0,\n        fontsz * IMG_SIZE / 256,\n        color,\n        1,\n    )\n    cv2.putText(\n        frame,\n        f\"Model time (ms): {timing['predict']:.1f}\",\n        (x0, y0 + ystep * 1),\n        0,\n        fontsz * IMG_SIZE / 256,\n        black,\n        black_thick,\n    )\n    cv2.putText(\n        frame,\n        f\"Model time (ms): {timing['predict']:.1f}\",\n        (x0, y0 + ystep * 1),\n        0,\n        fontsz * IMG_SIZE / 256,\n        color,\n        1,\n    )\n    cv2.putText(\n        frame,\n        f\"Preproc time (ms): {timing['preproc']:.1f}\",\n        (x0, y0 + ystep * 2),\n        0,\n        fontsz * IMG_SIZE / 256,\n        black,\n        black_thick,\n    )\n    cv2.putText(\n        frame,\n        f\"Preproc time (ms): {timing['preproc']:.1f}\",\n        (x0, y0 + ystep * 2),\n        0,\n        fontsz * IMG_SIZE / 256,\n        color,\n        1,\n    )\n    cv2.putText(\n        frame,\n        f\"Postproc time (ms): {timing['postproc']:.1f}\",\n        (x0, y0 + ystep * 3),\n        0,\n        fontsz * IMG_SIZE / 256,\n        black,\n        black_thick,\n    )\n    cv2.putText(\n        frame,\n        f\"Postproc time (ms): {timing['postproc']:.1f}\",\n        (x0, y0 + ystep * 3),\n        0,\n        fontsz * IMG_SIZE / 256,\n        color,\n        1,\n    )\n    return frame\n\n\ndef draw_landmark_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255), IMG_SIZE=256):\n\n    frame = frame.copy()\n    cv2.putText(frame, \"ALIGN FACES\", (60, 20), 0, fontsz * IMG_SIZE / 255, color, thk)\n    cv2.putText(\n        frame, \"THEN PRESS X\", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk\n    )\n    return frame\n\n\ndef draw_calib_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255), IMG_SIZE=256):\n    frame = frame.copy()\n    cv2.putText(\n        frame, \"FIT FACE IN RECTANGLE\", (40, 20), 0, fontsz * IMG_SIZE / 255, color, thk\n    )\n    cv2.putText(frame, \"W - ZOOM IN\", (60, 40), 0, fontsz * IMG_SIZE / 255, color, thk)\n    cv2.putText(frame, \"S - ZOOM OUT\", (60, 60), 0, fontsz * IMG_SIZE / 255, color, thk)\n    cv2.putText(\n        frame, \"THEN PRESS X\", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk\n    )\n    return frame\n\n\ndef select_camera(log, config):\n    cam_config = config[\"cam_config\"]\n    cam_id = None\n\n    if os.path.isfile(cam_config):\n        with open(cam_config, \"r\") as f:\n            cam_config = yaml.load(f, Loader=yaml.FullLoader)\n            cam_id = cam_config[\"cam_id\"]\n    else:\n        cam_frames = query_cameras(config[\"query_n_cams\"])\n\n        if cam_frames:\n            if len(cam_frames) == 1:\n                cam_id = list(cam_frames)[0]\n            else:\n                cam_id = select_camera(cam_frames, window=\"CLICK ON YOUR CAMERA\")\n            log(f\"Selected camera {cam_id}\")\n\n            with open(cam_config, \"w\") as f:\n                yaml.dump({\"cam_id\": cam_id}, f)\n        else:\n            log(\"No cameras are available\")\n\n    return cam_id\n"
  },
  {
    "path": "src/dot/commons/cam/camera_selector.py",
    "content": "#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport yaml\n\nfrom ..utils import log\n\ng_selected_cam = None\n\n\ndef query_cameras(n_cams):\n    cam_frames = {}\n    cap = None\n    for camid in range(n_cams):\n        log(f\"Trying camera with id {camid}\")\n        cap = cv2.VideoCapture(camid)\n\n        if not cap.isOpened():\n            log(f\"Camera with id {camid} is not available\")\n            continue\n\n        ret, frame = cap.read()\n\n        if not ret or frame is None:\n            log(f\"Could not read from camera with id {camid}\")\n            cap.release()\n            continue\n\n        for i in range(10):\n            ret, frame = cap.read()\n\n        cam_frames[camid] = frame.copy()\n\n        cap.release()\n\n    return cam_frames\n\n\ndef make_grid(images, cell_size=(320, 240), cols=2):\n    w0, h0 = cell_size\n    _rows = len(images) // cols + int(len(images) % cols)\n    _cols = min(len(images), cols)\n    grid = np.zeros((h0 * _rows, w0 * _cols, 3), dtype=np.uint8)\n    for i, (camid, img) in enumerate(images.items()):\n        img = cv2.resize(img, (w0, h0))\n        # add rect\n        img = cv2.rectangle(img, (1, 1), (w0 - 1, h0 - 1), (0, 0, 255), 2)\n        # add id\n        img = cv2.putText(img, f\"Camera {camid}\", (10, 30), 0, 1, (0, 255, 0), 2)\n        c = i % cols\n        r = i // cols\n        grid[r * h0 : (r + 1) * h0, c * w0 : (c + 1) * w0] = img[..., :3]\n    return grid\n\n\ndef mouse_callback(event, x, y, flags, userdata):\n    global g_selected_cam\n    if event == 1:\n        cell_size, grid_cols, cam_frames = userdata\n        c = x // cell_size[0]\n        r = y // cell_size[1]\n        camid = r * grid_cols + c\n        if camid < len(cam_frames):\n            g_selected_cam = camid\n\n\ndef select_camera(cam_frames, window=\"Camera selector\"):\n    cell_size = 320, 240\n    grid_cols = 2\n    grid = make_grid(cam_frames, cols=grid_cols)\n\n    # to fit the text if only one cam available\n    if grid.shape[1] == 320:\n        cell_size = 640, 480\n        grid = cv2.resize(grid, cell_size)\n\n    cv2.putText(\n        grid,\n        \"Click on the web camera to use\",\n        (10, grid.shape[0] - 30),\n        0,\n        0.7,\n        (200, 200, 200),\n        2,\n    )\n\n    cv2.namedWindow(window)\n    cv2.setMouseCallback(window, mouse_callback, (cell_size, grid_cols, cam_frames))\n    cv2.imshow(window, grid)\n\n    while True:\n        key = cv2.waitKey(10)\n\n        if g_selected_cam is not None:\n            break\n\n        if key == 27:\n            break\n\n    cv2.destroyAllWindows()\n\n    if g_selected_cam is not None:\n        return list(cam_frames)[g_selected_cam]\n    else:\n        return list(cam_frames)[0]\n\n\nif __name__ == \"__main__\":\n    with open(\"config.yaml\", \"r\") as f:\n        config = yaml.load(f, Loader=yaml.FullLoader)\n\n    cam_frames = query_cameras(config[\"query_n_cams\"])\n\n    if cam_frames:\n        selected_cam = select_camera(cam_frames)\n        print(f\"Selected camera {selected_cam}\")\n    else:\n        log(\"No cameras are available\")\n"
  },
  {
    "path": "src/dot/commons/camera_utils.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nfrom typing import Any, Callable, Dict, List, Union\n\nimport cv2\nimport numpy as np\n\nfrom .cam.cam import draw_fps\nfrom .utils import TicToc, find_images_from_path\nfrom .video.videocaptureasync import VideoCaptureAsync\n\n\ndef fetch_camera(target: int) -> VideoCaptureAsync:\n    \"\"\"Fetches a VideoCaptureAsync object.\n\n    Args:\n        target (int): Camera ID descriptor.\n\n    Raises:\n        ValueError: If camera ID descriptor is not valid.\n\n    Returns:\n        VideoCaptureAsync: VideoCaptureAsync object.\n    \"\"\"\n    try:\n        return VideoCaptureAsync(target)\n    except RuntimeError:\n        raise ValueError(f\"Camera {target} does not exist.\")\n\n\ndef camera_pipeline(\n    cap: VideoCaptureAsync,\n    source: str,\n    target: int,\n    change_option: Callable[[np.ndarray], None],\n    process_image: Callable[[np.ndarray], np.ndarray],\n    post_process_image: Callable[[np.ndarray], np.ndarray],\n    crop_size: int = 224,\n    show_fps: bool = False,\n    **kwargs: Dict,\n) -> None:\n    \"\"\"Open a webcam stream `target` and performs face-swap based on `source` image by frame.\n\n    Args:\n        cap (VideoCaptureAsync): VideoCaptureAsync object.\n        source (str): Path to source image folder.\n        target (int): Camera ID descriptor.\n        change_option (Callable[[np.ndarray], None]): Set `source` arg as faceswap source image.\n        process_image (Callable[[np.ndarray], np.ndarray]): Performs actual face swap.\n        post_process_image (Callable[[np.ndarray], np.ndarray]): Applies face restoration GPEN to result image.\n        crop_size (int, optional): Face crop size. Defaults to 224.\n        show_fps (bool, optional): Display FPS. Defaults to False.\n    \"\"\"\n    source = find_images_from_path(source)\n    print(\"=== Control keys ===\")\n    print(\"1-9: Change avatar\")\n    for i, fname in enumerate(source):\n        print(str(i + 1) + \": \" + fname)\n\n    # Todo describe controls available\n\n    pic_a = source[0]\n\n    img_a_whole = cv2.imread(pic_a)\n    change_option(img_a_whole)\n\n    img_a_align_crop = process_image(img_a_whole)\n    img_a_align_crop = post_process_image(img_a_align_crop)\n\n    cap.start()\n    ret, frame = cap.read()\n    cv2.namedWindow(\"cam\", cv2.WINDOW_GUI_NORMAL)\n    cv2.moveWindow(\"cam\", 500, 250)\n\n    frame_index = -1\n    fps_hist: List = []\n    fps: Union[Any, float] = 0\n\n    show_self = False\n    while True:\n        frame_index += 1\n        ret, frame = cap.read()\n        frame = cv2.flip(frame, 1)\n        if ret:\n            tt = TicToc()\n\n            timing = {\"preproc\": 0, \"predict\": 0, \"postproc\": 0}\n\n            tt.tic()\n\n            key = cv2.waitKey(1)\n            if 48 < key < 58:\n                show_self = False\n                source_image_i = min(key - 49, len(source) - 1)\n                pic_a = source[source_image_i]\n                img_a_whole = cv2.imread(pic_a)\n                change_option(img_a_whole, **kwargs)\n            elif key == ord(\"y\"):\n                show_self = True\n\n            elif key == ord(\"q\"):\n                break\n            elif key == ord(\"i\"):\n                show_fps = not show_fps\n\n            if not show_self:\n                result_frame = process_image(frame, crop_size=crop_size, **kwargs)  # type: ignore\n                timing[\"postproc\"] = tt.toc()\n                result_frame = post_process_image(result_frame, **kwargs)\n\n                if show_fps:\n                    result_frame = draw_fps(np.array(result_frame), fps, timing)\n\n                fps_hist.append(tt.toc(total=True))\n                if len(fps_hist) == 10:\n                    fps = 10 / (sum(fps_hist) / 1000)\n                    fps_hist = []\n\n                cv2.imshow(\"cam\", result_frame)\n\n            else:\n                cv2.imshow(\"cam\", frame)\n\n        else:\n            break\n    cap.stop()\n    cv2.destroyAllWindows()\n"
  },
  {
    "path": "src/dot/commons/model_option.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport os\nfrom abc import ABC, abstractmethod\nfrom typing import Dict, List, Optional, Tuple, Union\n\nimport cv2\nimport torch\n\nfrom ..gpen.face_enhancement import FaceEnhancement\nfrom .camera_utils import camera_pipeline, fetch_camera\nfrom .utils import find_images_from_path, generate_random_file_idx, rand_idx_tuple\nfrom .video.video_utils import video_pipeline\n\n\nclass ModelOption(ABC):\n    def __init__(\n        self,\n        gpen_type=None,\n        gpen_path=\"saved_models/gpen\",\n        use_gpu=True,\n        crop_size=256,\n    ):\n\n        self.gpen_type = gpen_type\n        self.use_gpu = use_gpu\n        self.crop_size = crop_size\n\n        if gpen_type:\n            if gpen_type == \"gpen_512\":\n                model = {\n                    \"name\": \"GPEN-BFR-512\",\n                    \"size\": 512,\n                    \"channel_multiplier\": 2,\n                    \"narrow\": 1,\n                }\n            else:\n                model = {\n                    \"name\": \"GPEN-BFR-256\",\n                    \"size\": 256,\n                    \"channel_multiplier\": 1,\n                    \"narrow\": 0.5,\n                }\n\n            self.face_enhancer = FaceEnhancement(\n                size=model[\"size\"],\n                model=model[\"name\"],\n                channel_multiplier=model[\"channel_multiplier\"],\n                narrow=model[\"narrow\"],\n                use_gpu=self.use_gpu,\n                base_dir=gpen_path,\n            )\n\n    def generate_from_image(\n        self,\n        source: Union[str, List],\n        target: Union[str, List],\n        save_folder: str,\n        limit: Optional[int] = None,\n        swap_case_idx: Optional[Tuple] = (0, 0),\n        **kwargs,\n    ) -> Optional[List[Dict]]:\n        \"\"\"_summary_\n\n        Args:\n            source (Union[str, List]): A list with source images filepaths, or single image filepath.\n            target (Union[str, List]): A list with target images filepaths, or single image filepath.\n            save_folder (str): Output path.\n            limit (Optional[int], optional): Total number of face-swaps. If None,\n            is set to `len(souce)` * `len(target)`. Defaults to None.\n            swap_case_idx (Optional[Tuple], optional): Used as keyword among multiple swaps. Defaults to (0, 0).\n\n        Returns:\n            List[Dict]: Array of successful and rejected metadata dictionaries\n        \"\"\"\n\n        if not save_folder:\n            print(\"Need to define output folder... Skipping\")\n            return None\n\n        # source/target can be single file\n        if not isinstance(source, list):\n            source = find_images_from_path(source)\n            target = find_images_from_path(target)\n\n        if not limit:\n            # allow all possible swaps\n            limit = len(source) * len(target)\n\n        swappedDict = {}\n        rejectedDict = {}\n        count = 0\n        rejected_count = 0\n        seen_swaps = []\n        source_len = len(source)\n        target_len = len(target)\n        with torch.no_grad():\n            profiler = kwargs.get(\"profiler\", False)\n            if not profiler:\n                self.create_model(**kwargs)\n\n            while count < limit:\n                rand_swap = rand_idx_tuple(source_len, target_len)\n                while rand_swap in seen_swaps:\n                    rand_swap = rand_idx_tuple(source_len, target_len)\n\n                src_idx = rand_swap[0]\n                tar_idx = rand_swap[1]\n                src_img = source[src_idx]\n                tar_img = target[tar_idx]\n\n                # check if files exits\n                if not os.path.exists(src_img) or not os.path.exists(tar_img):\n                    print(\"source/image file does not exist\", src_img, tar_img)\n                    continue\n\n                # read source image\n                source_image = cv2.imread(src_img)\n                frame = cv2.imread(tar_img)\n\n                try:\n                    self.change_option(source_image)\n                    frame = self.process_image(frame, use_cam=False, ignore_error=False)\n\n                    # check if frame == target_image, if it does, image rejected\n                    frame = self.post_process_image(frame)\n\n                    # flush image to disk\n                    file_idx = generate_random_file_idx(6)\n                    file_name = os.path.join(save_folder, f\"{file_idx:0>6}.jpg\")\n                    while os.path.exists(file_name):\n                        print(f\"Swap id: {file_idx} already exists, generating again.\")\n                        file_idx = generate_random_file_idx(6)\n                        file_name = os.path.join(save_folder, f\"{file_idx:0>6}.jpg\")\n\n                    cv2.imwrite(file_name, frame)\n\n                    # keep track metadata\n                    key = f\"{swap_case_idx[1]}{file_idx:0>6}.jpg\"\n                    swappedDict[key] = {\n                        \"target\": {\"path\": tar_img, \"size\": frame.shape},\n                        \"source\": {\"path\": src_img, \"size\": source_image.shape},\n                    }\n\n                    print(\n                        f\"{count}: Performed face swap {src_img, tar_img} saved to {file_name}\"\n                    )\n\n                    # keep track of previous swaps\n                    seen_swaps.append(rand_swap)\n                    count += 1\n                except Exception as e:\n                    rejectedDict[rejected_count] = {\n                        \"target\": {\"path\": tar_img, \"size\": frame.shape},\n                        \"source\": {\"path\": src_img, \"size\": source_image.shape},\n                    }\n                    rejected_count += 1\n                    print(f\"Cannot perform face swap {src_img, tar_img}\")\n                    print(e)\n            return [swappedDict, rejectedDict]\n\n    def generate_from_camera(\n        self,\n        source: str,\n        target: int,\n        opt_crop_size: int = 224,\n        show_fps: bool = False,\n        **kwargs: Dict,\n    ) -> None:\n        \"\"\"Invokes `camera_pipeline` main-loop.\n\n        Args:\n            source (str): Source image filepath.\n            target (int): Camera descriptor/ID.\n            opt_crop_size (int, optional): Crop size. Defaults to 224.\n            show_fps (bool, optional): Show FPS. Defaults to False.\n        \"\"\"\n        with torch.no_grad():\n            cap = fetch_camera(target)\n            self.create_model(opt_crop_size=opt_crop_size, **kwargs)\n            camera_pipeline(\n                cap,\n                source,\n                target,\n                self.change_option,\n                self.process_image,\n                self.post_process_image,\n                crop_size=opt_crop_size,\n                show_fps=show_fps,\n            )\n\n    def generate_from_video(\n        self,\n        source: str,\n        target: str,\n        save_folder: str,\n        duration: int,\n        limit: int = None,\n        **kwargs: Dict,\n    ) -> None:\n        \"\"\"Invokes `video_pipeline` main-loop.\n\n        Args:\n            source (str): Source image filepath.\n            target (str): Target video filepath.\n            save_folder (str): Output folder.\n            duration (int): Trim target video in seconds.\n            limit (int, optional): Limit number of video-swaps. Defaults to None.\n        \"\"\"\n        with torch.no_grad():\n            self.create_model(**kwargs)\n            video_pipeline(\n                source,\n                target,\n                save_folder,\n                duration,\n                self.change_option,\n                self.process_image,\n                self.post_process_image,\n                self.crop_size,\n                limit,\n                **kwargs,\n            )\n\n    def post_process_image(self, image, **kwargs):\n        if self.gpen_type:\n            image, orig_faces, enhanced_faces = self.face_enhancer.process(\n                img=image, use_gpu=self.use_gpu\n            )\n\n        return image\n\n    @abstractmethod\n    def change_option(self, image, **kwargs):\n        pass\n\n    @abstractmethod\n    def process_image(self, image, **kwargs):\n        pass\n\n    @abstractmethod\n    def create_model(self, source, target, limit=None, swap_case_idx=0, **kwargs):\n        pass\n"
  },
  {
    "path": "src/dot/commons/pose/head_pose.py",
    "content": "#!/usr/bin/env python3\nimport cv2\nimport mediapipe as mp\nimport numpy as np\n\nmp_face_mesh = mp.solutions.face_mesh\nface_mesh = mp_face_mesh.FaceMesh(\n    min_detection_confidence=0.5, min_tracking_confidence=0.5\n)\nmp_drawing = mp.solutions.drawing_utils\n\n# https://github.com/google/mediapipe/issues/1615\nHEAD_POSE_LANDMARKS = [\n    33,\n    263,\n    1,\n    61,\n    291,\n    199,\n]\n\n\ndef pose_estimation(\n    image: np.array, roll: int = 3, pitch: int = 3, yaw: int = 3\n) -> int:\n    \"\"\"\n    Adjusted from: https://github.com/niconielsen32/ComputerVision/blob/master/headPoseEstimation.py\n    Given an image and desired `roll`, `pitch` and `yaw` angles, the method checks whether\n    estimated head-pose meets requirements.\n\n    Args:\n        image: Image to estimate head pose.\n        roll: Rotation margin in X axis.\n        pitch: Rotation margin in Y axis.\n        yaw: Rotation margin in Z axis.\n\n    Returns:\n        int: Success(0) or Fail(-1).\n    \"\"\"\n\n    results = face_mesh.process(image)\n    img_h, img_w, img_c = image.shape\n    face_3d = []\n    face_2d = []\n    if results.multi_face_landmarks:\n        for face_landmarks in results.multi_face_landmarks:\n            for idx, lm in enumerate(face_landmarks.landmark):\n                if idx in HEAD_POSE_LANDMARKS:\n                    x, y = int(lm.x * img_w), int(lm.y * img_h)\n\n                    # get 2d coordinates\n                    face_2d.append([x, y])\n\n                    # get 3d coordinates\n                    face_3d.append([x, y, lm.z])\n\n            # convert to numpy\n            face_2d = np.array(face_2d, dtype=np.float64)\n            face_3d = np.array(face_3d, dtype=np.float64)\n\n            # camera matrix\n            focal_length = 1 * img_w\n            cam_matrix = np.array(\n                [[focal_length, 9, img_h / 2], [0, focal_length, img_w / 2], [0, 0, 1]]\n            )\n            # distortion\n            dist_matrix = np.zeros((4, 1), dtype=np.float64)\n            # solve pnp\n            success, rot_vec, trans_vec = cv2.solvePnP(\n                face_3d, face_2d, cam_matrix, dist_matrix\n            )\n            # rotational matrix\n            rmat, jac = cv2.Rodrigues(rot_vec)\n            # get angles\n            angles, mtxR, mtxQ, Qx, Qy, Qz = cv2.RQDecomp3x3(rmat)\n\n            # get rotation angles\n            x = angles[0] * 360\n            y = angles[1] * 360\n            z = angles[2] * 360\n\n            # head rotation in X axis\n            if x < -roll or x > roll:\n                return -1\n\n            # head rotation in Y axis\n            if y < -pitch or y > pitch:\n                return -1\n\n            # head rotation in Z axis\n            if z < -yaw or z > yaw:\n                return -1\n\n            return 0\n\n    return -1\n"
  },
  {
    "path": "src/dot/commons/utils.py",
    "content": "#!/usr/bin/env python3\n\nimport glob\nimport os\nimport random\nimport sys\nimport time\nfrom collections import defaultdict\nfrom typing import Dict, List, Tuple\n\nimport cv2\nimport numpy as np\n\nSEED = 42\nnp.random.seed(SEED)\n\n\ndef log(*args, **kwargs):\n    time_str = f\"{time.time():.6f}\"\n    print(f\"[{time_str}]\", *args, **kwargs)\n\n\ndef info(*args, file=sys.stdout, **kwargs):\n    print(*args, file=file, **kwargs)\n\n\ndef find_images_from_path(path):\n    \"\"\"\n    @arguments:\n        path              (str/int)    : Could be either path(str)\n                                         or a CamID(int)\n    \"\"\"\n    if os.path.isfile(path):\n        return [path]\n\n    try:\n        return int(path)\n    except ValueError:\n        # supported extensions\n        ext = [\"png\", \"jpg\", \"jpeg\"]\n        files = []\n        [files.extend(glob.glob(path + \"**/*.\" + e, recursive=True)) for e in ext]\n\n        return files\n\n\ndef find_files_from_path(path: str, ext: List, filter: str = None):\n    \"\"\"\n    @arguments:\n        path              (str)     Parent directory of files\n        ext               (list)    List of desired file extensions\n    \"\"\"\n    if os.path.isdir(path):\n        files = []\n        [\n            files.extend(glob.glob(path + \"**/*.\" + e, recursive=True)) for e in ext  # type: ignore\n        ]\n        np.random.shuffle(files)\n\n        # filter\n        if filter is not None:\n            files = [file for file in files if filter in file]\n            print(\"Filtered files: \", len(files))\n\n        return files\n\n    return [path]\n\n\ndef expand_bbox(\n    bbox, image_width, image_height, scale=None\n) -> Tuple[int, int, int, int]:\n    if scale is None:\n        raise ValueError(\"scale parameter is none\")\n\n    x1, y1, x2, y2 = bbox\n\n    center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n    size_bb = round(max(x2 - x1, y2 - y1) * scale)\n\n    # Check for out of bounds, x-y top left corner\n    x1 = max(int(center_x - size_bb // 2), 0)\n    y1 = max(int(center_y - size_bb // 2), 0)\n\n    # Check for too big bb size for given x, y\n    size_bb = min(image_width - x1, size_bb)\n    size_bb = min(image_height - y1, size_bb)\n\n    return (x1, y1, x1 + size_bb, y1 + size_bb)\n\n\ndef rand_idx_tuple(source_len, target_len):\n    \"\"\"\n    pick a random tuple for source/target\n    \"\"\"\n    return (random.randrange(source_len), random.randrange(target_len))\n\n\ndef generate_random_file_idx(length):\n    return int(\"\".join([str(random.randint(0, 10)) for _ in range(length)]))\n\n\nclass Tee(object):\n    def __init__(self, filename, mode=\"w\", terminal=sys.stderr):\n        self.file = open(filename, mode, buffering=1)\n        self.terminal = terminal\n\n    def __del__(self):\n        self.file.close()\n\n    def write(self, *args, **kwargs):\n        log(*args, file=self.file, **kwargs)\n        log(*args, file=self.terminal, **kwargs)\n\n    def __call__(self, *args, **kwargs):\n        return self.write(*args, **kwargs)\n\n    def flush(self):\n        self.file.flush()\n\n\nclass Logger:\n    def __init__(self, filename, verbose=True):\n        self.tee = Tee(filename)\n        self.verbose = verbose\n\n    def __call__(self, *args, important=False, **kwargs):\n        if not self.verbose and not important:\n            return\n\n        self.tee(*args, **kwargs)\n\n\nclass Once:\n    _id: Dict = {}\n\n    def __init__(self, what, who=log, per=1e12):\n        \"\"\"Do who(what) once per seconds.\n        what: args for who\n        who: callable\n        per: frequency in seconds.\n        \"\"\"\n        assert callable(who)\n        now = time.time()\n        if what not in Once._id or now - Once._id[what] > per:\n            who(what)\n            Once._id[what] = now\n\n\nclass TicToc:\n    def __init__(self):\n        self.t = None\n        self.t_init = time.time()\n\n    def tic(self):\n        self.t = time.time()\n\n    def toc(self, total=False):\n        if total:\n            return (time.time() - self.t_init) * 1000\n\n        assert self.t, \"You forgot to call tic()\"\n        return (time.time() - self.t) * 1000\n\n    def tocp(self, str):\n        t = self.toc()\n        log(f\"{str} took {t:.4f}ms\")\n        return t\n\n\nclass AccumDict:\n    def __init__(self, num_f=3):\n        self.d = defaultdict(list)\n        self.num_f = num_f\n\n    def add(self, k, v):\n        self.d[k] += [v]\n\n    def __dict__(self):\n        return self.d\n\n    def __getitem__(self, key):\n        return self.d[key]\n\n    def __str__(self):\n        s = \"\"\n        for k in self.d:\n            if not self.d[k]:\n                continue\n            cur = self.d[k][-1]\n            avg = np.mean(self.d[k])\n            format_str = \"{:.%df}\" % self.num_f\n            cur_str = format_str.format(cur)\n            avg_str = format_str.format(avg)\n            s += f\"{k} {cur_str} ({avg_str})\\t\\t\"\n        return s\n\n    def __repr__(self):\n        return self.__str__()\n\n\ndef clamp(value, min_value, max_value):\n    return max(min(value, max_value), min_value)\n\n\ndef crop(img, p=0.7, offset_x=0, offset_y=0):\n    h, w = img.shape[:2]\n    x = int(min(w, h) * p)\n    _l = (w - x) // 2\n    r = w - _l\n    u = (h - x) // 2\n    d = h - u\n\n    offset_x = clamp(offset_x, -_l, w - r)\n    offset_y = clamp(offset_y, -u, h - d)\n\n    _l += offset_x\n    r += offset_x\n    u += offset_y\n    d += offset_y\n\n    return img[u:d, _l:r], (offset_x, offset_y)\n\n\ndef pad_img(img, target_size, default_pad=0):\n    sh, sw = img.shape[:2]\n    w, h = target_size\n    pad_w, pad_h = default_pad, default_pad\n    if w / h > 1:\n        pad_w += int(sw * (w / h) - sw) // 2\n    else:\n        pad_h += int(sh * (h / w) - sh) // 2\n    out = np.pad(img, [[pad_h, pad_h], [pad_w, pad_w], [0, 0]], \"constant\")\n    return out\n\n\ndef resize(img, size, version=\"cv\"):\n    return cv2.resize(img, size)\n\n\ndef determine_path():\n    \"\"\"\n    Find the script path\n    \"\"\"\n    try:\n        root = __file__\n        if os.path.islink(root):\n            root = os.path.realpath(root)\n\n        return os.path.dirname(os.path.abspath(root))\n    except Exception as e:\n        print(e)\n        print(\"I'm sorry, but something is wrong.\")\n        print(\"There is no __file__ variable. Please contact the author.\")\n        sys.exit()\n"
  },
  {
    "path": "src/dot/commons/video/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/commons/video/video_utils.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport os\nimport random\nfrom typing import Callable, Dict, Union\n\nimport cv2\nimport mediapipe as mp\nimport numpy as np\nfrom mediapipe.python.solutions.drawing_utils import _normalized_to_pixel_coordinates\n\nfrom ..pose.head_pose import pose_estimation\nfrom ..utils import expand_bbox, find_files_from_path\n\nmp_face = mp.solutions.face_detection.FaceDetection(\n    model_selection=0,  # model selection\n    min_detection_confidence=0.5,  # confidence threshold\n)\n\n\ndef _crop_and_pose(\n    image: np.array, estimate_pose: bool = False\n) -> Union[np.array, int]:\n    \"\"\"Crops face of `image` and estimates head pose.\n\n    Args:\n        image (np.array): Image to be cropped and estimate pose.\n        estimate_pose (Boolean, optional): Enables pose estimation. Defaults to False.\n\n    Returns:\n        Union[np.array,int]: Cropped image or -1.\n    \"\"\"\n\n    image_rows, image_cols, _ = image.shape\n    results = mp_face.process(image)\n    if results.detections is None:\n        return -1\n\n    detection = results.detections[0]\n    location = detection.location_data\n    relative_bounding_box = location.relative_bounding_box\n    rect_start_point = _normalized_to_pixel_coordinates(\n        relative_bounding_box.xmin, relative_bounding_box.ymin, image_cols, image_rows\n    )\n    rect_end_point = _normalized_to_pixel_coordinates(\n        min(relative_bounding_box.xmin + relative_bounding_box.width, 1.0),\n        min(relative_bounding_box.ymin + relative_bounding_box.height, 1.0),\n        image_cols,\n        image_rows,\n    )\n\n    xleft, ytop = rect_start_point\n    xright, ybot = rect_end_point\n\n    xleft, ytop, xright, ybot = expand_bbox(\n        (xleft, ytop, xright, ybot), image_rows, image_cols, 2.0\n    )\n\n    try:\n        crop_image = image[ytop:ybot, xleft:xright]\n        if estimate_pose:\n            if pose_estimation(image=crop_image, roll=3, pitch=3, yaw=3) != 0:\n                return -1\n\n        return cv2.flip(crop_image, 1)\n    except Exception as e:\n        print(e)\n        return -1\n\n\ndef video_pipeline(\n    source: str,\n    target: str,\n    save_folder: str,\n    duration: int,\n    change_option: Callable[[np.ndarray], None],\n    process_image: Callable[[np.ndarray], np.ndarray],\n    post_process_image: Callable[[np.ndarray], np.ndarray],\n    crop_size: int = 224,\n    limit: int = None,\n    **kwargs: Dict,\n) -> None:\n    \"\"\"Process input video file `target` by frame and performs face-swap based on first image\n    found in `source` path folder. Uses cv2.VideoWriter to flush the resulted video on disk.\n    Trimming video is done as: trimmed = fps * duration.\n\n    Args:\n        source (str): Path to source image folder.\n        target (str): Path to target video folder.\n        save_folder (str): Output folder path.\n        duration (int): Crop target video in seconds.\n        change_option (Callable[[np.ndarray], None]): Set `source` arg as faceswap source image.\n        process_image (Callable[[np.ndarray], np.ndarray]): Performs actual face swap.\n        post_process_image (Callable[[np.ndarray], np.ndarray]): Applies face restoration GPEN to result image.\n        head_pose (bool): Estimates head pose before swap. Used by Avatarify.\n        crop_size (int, optional): Face crop size. Defaults to 224.\n        limit (int, optional): Limit number of video-swaps. Defaults to None.\n    \"\"\"\n    head_pose = kwargs.get(\"head_pose\", False)\n    source_imgs = find_files_from_path(source, [\"jpg\", \"png\", \"jpeg\"], filter=None)\n    target_videos = find_files_from_path(target, [\"avi\", \"mp4\", \"mov\", \"MOV\"])\n    if not source_imgs or not target_videos:\n        print(\"Could not find any source/target files\")\n        return\n\n    # unique combinations of source/target\n    swaps_combination = [(im, vi) for im in source_imgs for vi in target_videos]\n    # randomize list\n    random.shuffle(swaps_combination)\n    if limit:\n        swaps_combination = swaps_combination[:limit]\n\n    print(\"Total source images: \", len(source_imgs))\n    print(\"Total target videos: \", len(target_videos))\n    print(\"Total number of face-swaps: \", len(swaps_combination))\n\n    # iterate on each source-target pair\n    for (source, target) in swaps_combination:\n        img_a_whole = cv2.imread(source)\n        img_a_whole = _crop_and_pose(img_a_whole, estimate_pose=head_pose)\n        if isinstance(img_a_whole, int):\n            print(\n                f\"Image {source} failed on face detection or pose estimation requirements haven't met.\"\n            )\n            continue\n\n        change_option(img_a_whole)\n\n        img_a_align_crop = process_image(img_a_whole)\n        img_a_align_crop = post_process_image(img_a_align_crop)\n\n        # video handle\n        cap = cv2.VideoCapture(target)\n\n        fps = int(cap.get(cv2.CAP_PROP_FPS))\n\n        frame_width = int(cap.get(3))\n        frame_height = int(cap.get(4))\n\n        total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n        # trim original video length\n        if duration and (fps * int(duration)) < total_frames:\n            total_frames = fps * int(duration)\n\n        # result video is saved in `save_folder` with name combining source/target files.\n        source_base_name = os.path.basename(source)\n        target_base_name = os.path.basename(target)\n        output_file = f\"{os.path.splitext(source_base_name)[0]}_{os.path.splitext(target_base_name)[0]}.mp4\"\n        output_file = os.path.join(save_folder, output_file)\n\n        fourcc = cv2.VideoWriter_fourcc(\"X\", \"V\", \"I\", \"D\")\n        video_writer = cv2.VideoWriter(\n            output_file, fourcc, fps, (frame_width, frame_height), True\n        )\n        print(\n            f\"Source: {source} \\nTarget: {target} \\nOutput: {output_file} \\nFPS: {fps} \\nTotal frames: {total_frames}\"\n        )\n\n        # process each frame individually\n        for _ in range(total_frames):\n            ret, frame = cap.read()\n            if ret is True:\n                frame = cv2.flip(frame, 1)\n                result_frame = process_image(frame, use_cam=False, crop_size=crop_size, **kwargs)  # type: ignore\n                result_frame = post_process_image(result_frame, **kwargs)\n                video_writer.write(result_frame)\n            else:\n                break\n\n        cap.release()\n        video_writer.release()\n"
  },
  {
    "path": "src/dot/commons/video/videocaptureasync.py",
    "content": "#!/usr/bin/env python3\n# https://github.com/gilbertfrancois/video-capture-async\n\nimport threading\nimport time\n\nimport cv2\n\nWARMUP_TIMEOUT = 10.0\n\n\nclass VideoCaptureAsync:\n    def __init__(self, src=0, width=640, height=480):\n        self.src = src\n\n        self.cap = cv2.VideoCapture(self.src)\n        if not self.cap.isOpened():\n            raise RuntimeError(\"Cannot open camera\")\n\n        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)\n        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)\n        self.grabbed, self.frame = self.cap.read()\n        self.started = False\n        self.read_lock = threading.Lock()\n\n    def set(self, var1, var2):\n        self.cap.set(var1, var2)\n\n    def isOpened(self):\n        return self.cap.isOpened()\n\n    def start(self):\n        if self.started:\n            print(\"[!] Asynchronous video capturing has already been started.\")\n            return None\n        self.started = True\n        self.thread = threading.Thread(target=self.update, args=(), daemon=True)\n        self.thread.start()\n\n        # (warmup) wait for the first successfully grabbed frame\n        warmup_start_time = time.time()\n        while not self.grabbed:\n            warmup_elapsed_time = time.time() - warmup_start_time\n            if warmup_elapsed_time > WARMUP_TIMEOUT:\n                raise RuntimeError(\n                    f\"Failed to succesfully grab frame from \"\n                    f\"the camera (timeout={WARMUP_TIMEOUT}s). \"\n                    f\"Try to restart.\"\n                )\n\n            time.sleep(0.5)\n\n        return self\n\n    def update(self):\n        while self.started:\n            grabbed, frame = self.cap.read()\n            if not grabbed or frame is None or frame.size == 0:\n                continue\n            with self.read_lock:\n                self.grabbed = grabbed\n                self.frame = frame\n\n    def read(self):\n        while True:\n            with self.read_lock:\n                frame = self.frame.copy()\n                grabbed = self.grabbed\n            break\n        return grabbed, frame\n\n    def stop(self):\n        self.started = False\n        self.thread.join()\n\n    def __exit__(self, exec_type, exc_value, traceback):\n        self.cap.release()\n"
  },
  {
    "path": "src/dot/dot.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nfrom pathlib import Path\nfrom typing import List, Optional, Union\n\nfrom .commons import ModelOption\nfrom .faceswap_cv2 import FaceswapCVOption\nfrom .fomm import FOMMOption\nfrom .simswap import SimswapOption\n\nAVAILABLE_SWAP_TYPES = [\"simswap\", \"fomm\", \"faceswap_cv2\"]\n\n\nclass DOT:\n    \"\"\"Main DOT Interface.\n\n    Supported Engines:\n        - `simswap`\n        - `fomm`\n        - `faceswap_cv2`\n\n    Attributes:\n        use_cam (bool): Use camera descriptor and pipeline.\n        use_video (bool): Use video-swap pipeline.\n        use_image (bool): Use image-swap pipeline.\n        save_folder (str): Output folder to store face-swaps and metadata file when `use_cam` is False.\n    \"\"\"\n\n    def __init__(\n        self,\n        use_video: bool = False,\n        use_image: bool = False,\n        save_folder: str = None,\n        *args,\n        **kwargs,\n    ):\n        \"\"\"Constructor method.\n\n        Args:\n            use_video (bool, optional): if True, use video-swap pipeline. Defaults to False.\n            use_image (bool, optional): if True, use image-swap pipeline. Defaults to False.\n            save_folder (str, optional): Output folder to store face-swaps and metadata file when `use_cam` is False.\n                Defaults to None.\n        \"\"\"\n        # init\n        self.use_video = use_video\n        self.save_folder = save_folder\n        self.use_image = use_image\n\n        # additional attributes\n        self.use_cam = (not use_video) and (not use_image)\n\n        # create output folder\n        if self.save_folder and not Path(self.save_folder).exists():\n            Path(self.save_folder).mkdir(parents=True, exist_ok=True)\n\n    def build_option(\n        self,\n        swap_type: str,\n        use_gpu: bool,\n        gpen_type: str,\n        gpen_path: str,\n        crop_size: int,\n        **kwargs,\n    ) -> ModelOption:\n        \"\"\"Build DOT option based on swap type.\n\n        Args:\n            swap_type (str): Swap type engine.\n            use_gpu (bool): If True, use GPU.\n            gpen_type (str): GPEN type.\n            gpen_path (str): path to GPEN model checkpoint.\n            crop_size (int): crop size.\n\n        Returns:\n            ModelOption: DOT option.\n        \"\"\"\n        if swap_type not in AVAILABLE_SWAP_TYPES:\n            raise ValueError(f\"Invalid swap type: {swap_type}\")\n\n        option: ModelOption = None\n        if swap_type == \"simswap\":\n            option = self.simswap(\n                use_gpu=use_gpu,\n                gpen_type=gpen_type,\n                gpen_path=gpen_path,\n                crop_size=crop_size,\n            )\n        elif swap_type == \"fomm\":\n            option = self.fomm(\n                use_gpu=use_gpu, gpen_type=gpen_type, gpen_path=gpen_path, **kwargs\n            )\n        elif swap_type == \"faceswap_cv2\":\n            option = self.faceswap_cv2(\n                use_gpu=use_gpu, gpen_type=gpen_type, gpen_path=gpen_path\n            )\n\n        return option\n\n    def generate(\n        self,\n        option: ModelOption,\n        source: str,\n        target: Union[int, str],\n        show_fps: bool = False,\n        duration: int = None,\n        **kwargs,\n    ) -> Optional[List]:\n        \"\"\"Differentiates among different swap options.\n\n        Available swap options:\n            - `camera`\n            - `image`\n            - `video`\n\n        Args:\n            option (ModelOption): Swap engine class.\n            source (str): File path of source image.\n            target (Union[int, str]): Either `int` which indicates camera descriptor or target image file.\n            show_fps (bool, optional): Displays FPS during camera pipeline. Defaults to False.\n            duration (int, optional): Used to trim source video in seconds. Defaults to None.\n\n        Returns:\n            Optional[List]: None when using camera, otherwise metadata of successful and rejected face-swaps.\n        \"\"\"\n        if self.use_cam:\n            option.generate_from_camera(\n                source, int(target), show_fps=show_fps, **kwargs\n            )\n            return None\n        if isinstance(target, str):\n            if self.use_video:\n                option.generate_from_video(\n                    source, target, self.save_folder, duration, **kwargs\n                )\n                return None\n            elif self.use_image:\n                [swappedDict, rejectedDict] = option.generate_from_image(\n                    source, target, self.save_folder, **kwargs\n                )\n                return [swappedDict, rejectedDict]\n            else:\n                return None\n        else:\n            return None\n\n    def simswap(\n        self,\n        use_gpu: bool,\n        gpen_type: str,\n        gpen_path: str,\n        crop_size: int = 224,\n        use_mask: bool = True,\n    ) -> SimswapOption:\n        \"\"\"Build Simswap Option.\n\n        Args:\n            use_gpu (bool): If True, use GPU.\n            gpen_type (str): GPEN type.\n            gpen_path (str): path to GPEN model checkpoint.\n            crop_size (int, optional): crop size. Defaults to 224.\n            use_mask (bool, optional): If True, use mask. Defaults to True.\n\n        Returns:\n            SimswapOption: Simswap Option.\n        \"\"\"\n        return SimswapOption(\n            use_gpu=use_gpu,\n            gpen_type=gpen_type,\n            gpen_path=gpen_path,\n            crop_size=crop_size,\n            use_mask=use_mask,\n        )\n\n    def faceswap_cv2(\n        self, use_gpu: bool, gpen_type: str, gpen_path: str, crop_size: int = 256\n    ) -> FaceswapCVOption:\n        \"\"\"Build FaceswapCV Option.\n\n        Args:\n            use_gpu (bool): If True, use GPU.\n            gpen_type (str): GPEN type.\n            gpen_path (str): path to GPEN model checkpoint.\n            crop_size (int, optional): crop size. Defaults to 256.\n\n        Returns:\n            FaceswapCVOption: FaceswapCV Option.\n        \"\"\"\n        return FaceswapCVOption(\n            use_gpu=use_gpu,\n            gpen_type=gpen_type,\n            gpen_path=gpen_path,\n            crop_size=crop_size,\n        )\n\n    def fomm(\n        self,\n        use_gpu: bool,\n        gpen_type: str,\n        gpen_path: str,\n        crop_size: int = 256,\n        **kwargs,\n    ) -> FOMMOption:\n        \"\"\"Build FOMM Option.\n\n        Args:\n            use_gpu (bool): If True, use GPU.\n            gpen_type (str): GPEN type.\n            gpen_path (str): path to GPEN model checkpoint.\n            crop_size (int, optional): crop size. Defaults to 256.\n\n        Returns:\n            FOMMOption: FOMM Option.\n        \"\"\"\n        return FOMMOption(\n            use_gpu=use_gpu,\n            gpen_type=gpen_type,\n            gpen_path=gpen_path,\n            crop_size=crop_size,\n            offline=self.use_video,\n        )\n"
  },
  {
    "path": "src/dot/faceswap_cv2/__init__.py",
    "content": "#!/usr/bin/env python3\n\nfrom .option import FaceswapCVOption\n\n__all__ = [\"FaceswapCVOption\"]\n"
  },
  {
    "path": "src/dot/faceswap_cv2/generic.py",
    "content": "#!/usr/bin/env python3\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport scipy.spatial as spatial\r\n\r\n\r\ndef bilinear_interpolate(img, coords):\r\n    \"\"\"\r\n    Interpolates over every image channel.\r\n    https://en.wikipedia.org/wiki/Bilinear_interpolation\r\n    :param img: max 3 channel image\r\n    :param coords: 2 x _m_ array. 1st row = xcoords, 2nd row = ycoords\r\n    :returns: array of interpolated pixels with same shape as coords\r\n    \"\"\"\r\n    int_coords = np.int32(coords)\r\n    x0, y0 = int_coords\r\n    dx, dy = coords - int_coords\r\n\r\n    # 4 Neighour pixels\r\n    q11 = img[y0, x0]\r\n    q21 = img[y0, x0 + 1]\r\n    q12 = img[y0 + 1, x0]\r\n    q22 = img[y0 + 1, x0 + 1]\r\n\r\n    btm = q21.T * dx + q11.T * (1 - dx)\r\n    top = q22.T * dx + q12.T * (1 - dx)\r\n    inter_pixel = top * dy + btm * (1 - dy)\r\n\r\n    return inter_pixel.T\r\n\r\n\r\ndef grid_coordinates(points):\r\n    \"\"\"\r\n    x,y grid coordinates within the ROI of supplied points.\r\n    :param points: points to generate grid coordinates\r\n    :returns: array of (x, y) coordinates\r\n    \"\"\"\r\n    xmin = np.min(points[:, 0])\r\n    xmax = np.max(points[:, 0]) + 1\r\n    ymin = np.min(points[:, 1])\r\n    ymax = np.max(points[:, 1]) + 1\r\n\r\n    return np.asarray(\r\n        [(x, y) for y in range(ymin, ymax) for x in range(xmin, xmax)], np.uint32\r\n    )\r\n\r\n\r\ndef process_warp(src_img, result_img, tri_affines, dst_points, delaunay):\r\n    \"\"\"\r\n    Warp each triangle from the src_image only within the\r\n    ROI of the destination image (points in dst_points).\r\n    \"\"\"\r\n    roi_coords = grid_coordinates(dst_points)\r\n    # indices to vertices. -1 if pixel is not in any triangle\r\n    roi_tri_indices = delaunay.find_simplex(roi_coords)\r\n\r\n    for simplex_index in range(len(delaunay.simplices)):\r\n        coords = roi_coords[roi_tri_indices == simplex_index]\r\n        num_coords = len(coords)\r\n        out_coords = np.dot(\r\n            tri_affines[simplex_index], np.vstack((coords.T, np.ones(num_coords)))\r\n        )\r\n        x, y = coords.T\r\n        result_img[y, x] = bilinear_interpolate(src_img, out_coords)\r\n\r\n    return None\r\n\r\n\r\ndef triangular_affine_matrices(vertices, src_points, dst_points):\r\n    \"\"\"\r\n    Calculate the affine transformation matrix for each\r\n    triangle (x,y) vertex from dst_points to src_points.\r\n    :param vertices: array of triplet indices to corners of triangle\r\n    :param src_points: array of [x, y] points to landmarks for source image\r\n    :param dst_points: array of [x, y] points to landmarks for destination image\r\n    :returns: 2 x 3 affine matrix transformation for a triangle\r\n    \"\"\"\r\n    ones = [1, 1, 1]\r\n    for tri_indices in vertices:\r\n        src_tri = np.vstack((src_points[tri_indices, :].T, ones))\r\n        dst_tri = np.vstack((dst_points[tri_indices, :].T, ones))\r\n        mat = np.dot(src_tri, np.linalg.inv(dst_tri))[:2, :]\r\n        yield mat\r\n\r\n\r\ndef warp_image_3d(src_img, src_points, dst_points, dst_shape, dtype=np.uint8):\r\n    rows, cols = dst_shape[:2]\r\n    result_img = np.zeros((rows, cols, 3), dtype=dtype)\r\n\r\n    delaunay = spatial.Delaunay(dst_points)\r\n    tri_affines = np.asarray(\r\n        list(triangular_affine_matrices(delaunay.simplices, src_points, dst_points))\r\n    )\r\n\r\n    process_warp(src_img, result_img, tri_affines, dst_points, delaunay)\r\n\r\n    return result_img\r\n\r\n\r\ndef transformation_from_points(points1, points2):\r\n    points1 = points1.astype(np.float64)\r\n    points2 = points2.astype(np.float64)\r\n\r\n    c1 = np.mean(points1, axis=0)\r\n    c2 = np.mean(points2, axis=0)\r\n    points1 -= c1\r\n    points2 -= c2\r\n\r\n    s1 = np.std(points1)\r\n    s2 = np.std(points2)\r\n    points1 /= s1\r\n    points2 /= s2\r\n\r\n    U, S, Vt = np.linalg.svd(np.dot(points1.T, points2))\r\n    R = (np.dot(U, Vt)).T\r\n\r\n    return np.vstack(\r\n        [\r\n            np.hstack([s2 / s1 * R, (c2.T - np.dot(s2 / s1 * R, c1.T))[:, np.newaxis]]),\r\n            np.array([[0.0, 0.0, 1.0]]),\r\n        ]\r\n    )\r\n\r\n\r\ndef warp_image_2d(im, M, dshape):\r\n    output_im = np.zeros(dshape, dtype=im.dtype)\r\n    cv2.warpAffine(\r\n        im,\r\n        M[:2],\r\n        (dshape[1], dshape[0]),\r\n        dst=output_im,\r\n        borderMode=cv2.BORDER_TRANSPARENT,\r\n        flags=cv2.WARP_INVERSE_MAP,\r\n    )\r\n\r\n    return output_im\r\n\r\n\r\ndef mask_from_points(size, points, erode_flag=1):\r\n    radius = 10  # kernel size\r\n    kernel = np.ones((radius, radius), np.uint8)\r\n\r\n    mask = np.zeros(size, np.uint8)\r\n    cv2.fillConvexPoly(mask, cv2.convexHull(points), 255)\r\n    if erode_flag:\r\n        mask = cv2.erode(mask, kernel, iterations=1)\r\n\r\n    return mask\r\n\r\n\r\ndef correct_colours(im1, im2, landmarks1):\r\n    COLOUR_CORRECT_BLUR_FRAC = 0.75\r\n    LEFT_EYE_POINTS = list(range(42, 48))\r\n    RIGHT_EYE_POINTS = list(range(36, 42))\r\n\r\n    blur_amount = COLOUR_CORRECT_BLUR_FRAC * np.linalg.norm(\r\n        np.mean(landmarks1[LEFT_EYE_POINTS], axis=0)\r\n        - np.mean(landmarks1[RIGHT_EYE_POINTS], axis=0)\r\n    )\r\n    blur_amount = int(blur_amount)\r\n    if blur_amount % 2 == 0:\r\n        blur_amount += 1\r\n    im1_blur = cv2.GaussianBlur(im1, (blur_amount, blur_amount), 0)\r\n    im2_blur = cv2.GaussianBlur(im2, (blur_amount, blur_amount), 0)\r\n\r\n    # Avoid divide-by-zero errors.\r\n    im2_blur = im2_blur.astype(int)\r\n    im2_blur += 128 * (im2_blur <= 1)\r\n\r\n    result = (\r\n        im2.astype(np.float64)\r\n        * im1_blur.astype(np.float64)\r\n        / im2_blur.astype(np.float64)\r\n    )\r\n    result = np.clip(result, 0, 255).astype(np.uint8)\r\n\r\n    return result\r\n\r\n\r\ndef apply_mask(img, mask):\r\n    \"\"\"\r\n    Apply mask to supplied image.\r\n    :param img: max 3 channel image\r\n    :param mask: [0-255] values in mask\r\n    :returns: new image with mask applied\r\n    \"\"\"\r\n    masked_img = cv2.bitwise_and(img, img, mask=mask)\r\n\r\n    return masked_img\r\n"
  },
  {
    "path": "src/dot/faceswap_cv2/option.py",
    "content": "#!/usr/bin/env python3\n\nimport cv2\nimport dlib\nimport numpy as np\n\nfrom ..commons import ModelOption\nfrom ..commons.utils import crop, resize\nfrom ..faceswap_cv2.swap import Swap\n\n\nclass FaceswapCVOption(ModelOption):\n    def __init__(\n        self,\n        use_gpu=True,\n        use_mask=False,\n        crop_size=224,\n        gpen_type=None,\n        gpen_path=None,\n    ):\n        super(FaceswapCVOption, self).__init__(\n            gpen_type=gpen_type,\n            use_gpu=use_gpu,\n            crop_size=crop_size,\n            gpen_path=gpen_path,\n        )\n        self.frame_proportion = 0.9\n        self.frame_offset_x = 0\n        self.frame_offset_y = 0\n\n    def create_model(self, model_path, **kwargs) -> None:  # type: ignore\n\n        self.model = Swap(\n            predictor_path=model_path, end=68, warp_2d=False, correct_color=True\n        )\n\n        self.detector = dlib.get_frontal_face_detector()\n\n    def change_option(self, image, **kwargs):\n        self.source_image = image\n        self.src_landmarks, self.src_shape, self.src_face = self.model._process_face(\n            image\n        )\n\n    def process_image(\n        self, image, use_cam=True, ignore_error=True, **kwargs\n    ) -> np.array:\n        frame = image[..., ::-1]\n\n        if use_cam:\n            frame, (self.frame_offset_x, self.frame_offset_y) = crop(\n                frame,\n                p=self.frame_proportion,\n                offset_x=self.frame_offset_x,\n                offset_y=self.frame_offset_y,\n            )\n            frame = resize(frame, (self.crop_size, self.crop_size))[..., :3]\n            frame = cv2.flip(frame, 1)\n\n        faces = self.detector(frame[..., ::-1])\n        if len(faces) > 0:\n            try:\n                swapped_img = self.model.apply_face_swap(\n                    source_image=self.source_image,\n                    target_image=frame,\n                    save_path=None,\n                    src_landmarks=self.src_landmarks,\n                    src_shape=self.src_shape,\n                    src_face=self.src_face,\n                )\n\n                swapped_img = np.array(swapped_img)[..., ::-1].copy()\n            except Exception as e:\n                if ignore_error:\n                    print(e)\n                    swapped_img = frame[..., ::-1].copy()\n                else:\n                    raise e\n        else:\n            swapped_img = frame[..., ::-1].copy()\n\n        return swapped_img\n"
  },
  {
    "path": "src/dot/faceswap_cv2/swap.py",
    "content": "#!/usr/bin/env python3\r\n\r\nfrom typing import Any, Dict\r\n\r\nimport cv2\r\nimport dlib\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\nfrom .generic import (\r\n    apply_mask,\r\n    correct_colours,\r\n    mask_from_points,\r\n    transformation_from_points,\r\n    warp_image_2d,\r\n    warp_image_3d,\r\n)\r\n\r\n# define globals\r\nCACHED_PREDICTOR_PATH = \"saved_models/faceswap_cv/shape_predictor_68_face_landmarks.dat\"\r\n\r\n\r\nclass Swap:\r\n    def __init__(\r\n        self,\r\n        predictor_path: str = None,\r\n        warp_2d: bool = True,\r\n        correct_color: bool = True,\r\n        end: int = 48,\r\n    ):\r\n        \"\"\"\r\n        Face Swap.\r\n        @description:\r\n            perform face swapping using Poisson blending\r\n        @arguments:\r\n            predictor_path: (str) path to 68-point facial landmark detector\r\n            warp_2d: (bool) if True, perform 2d warping for swapping\r\n            correct_color: (bool) if True, color correct swap output image\r\n            end: (int) last facial landmark point for face swap\r\n        \"\"\"\r\n        if not predictor_path:\r\n            predictor_path = CACHED_PREDICTOR_PATH\r\n\r\n        # init\r\n        self.predictor_path = predictor_path\r\n        self.warp_2d = warp_2d\r\n        self.correct_color = correct_color\r\n        self.end = end\r\n\r\n        # Load dlib models\r\n        self.detector = dlib.get_frontal_face_detector()\r\n        self.predictor = dlib.shape_predictor(self.predictor_path)\r\n\r\n    def apply_face_swap(self, source_image, target_image, save_path=None, **kwargs):\r\n        \"\"\"\r\n        apply face swapping from source to target image\r\n        @arguments:\r\n            source_image: (PIL or str) source PIL image or path to source image\r\n            target_image: (PIL or str) target PIL image or path to target image\r\n            save_path: (str) path to save face swap output image (optional)\r\n            **kwargs: Extra argument for specifying the source and target landmarks, shape and face\r\n        @returns:\r\n            faceswap_output_image: (PIL) face swap output image\r\n        \"\"\"\r\n        # load image if path given, else convert to cv2 format\r\n        if isinstance(source_image, str):\r\n            source_image_cv2 = cv2.imread(source_image)\r\n        else:\r\n            source_image_cv2 = cv2.cvtColor(np.array(source_image), cv2.COLOR_RGB2BGR)\r\n        if isinstance(target_image, str):\r\n            target_image_cv2 = cv2.imread(target_image)\r\n        else:\r\n            target_image_cv2 = cv2.cvtColor(np.array(target_image), cv2.COLOR_RGB2BGR)\r\n\r\n        # process source image\r\n        try:\r\n            src_landmarks = kwargs[\"src_landmarks\"]\r\n            src_shape = kwargs[\"src_shape\"]\r\n            src_face = kwargs[\"src_face\"]\r\n        except Exception as e:\r\n            print(e)\r\n            src_landmarks, src_shape, src_face = self._process_face(source_image_cv2)\r\n\r\n        # process target image\r\n        trg_landmarks, trg_shape, trg_face = self._process_face(target_image_cv2)\r\n\r\n        # get target face dimensions\r\n        h, w = trg_face.shape[:2]\r\n\r\n        # 3d warp\r\n        warped_src_face = warp_image_3d(\r\n            src_face, src_landmarks[: self.end], trg_landmarks[: self.end], (h, w)\r\n        )\r\n\r\n        # Mask for blending\r\n        mask = mask_from_points((h, w), trg_landmarks)\r\n        mask_src = np.mean(warped_src_face, axis=2) > 0\r\n        mask = np.asarray(mask * mask_src, dtype=np.uint8)\r\n\r\n        # Correct color\r\n        if self.correct_color:\r\n            warped_src_face = apply_mask(warped_src_face, mask)\r\n            dst_face_masked = apply_mask(trg_face, mask)\r\n            warped_src_face = correct_colours(\r\n                dst_face_masked, warped_src_face, trg_landmarks\r\n            )\r\n\r\n        # 2d warp\r\n        if self.warp_2d:\r\n            unwarped_src_face = warp_image_3d(\r\n                warped_src_face,\r\n                trg_landmarks[: self.end],\r\n                src_landmarks[: self.end],\r\n                src_face.shape[:2],\r\n            )\r\n            warped_src_face = warp_image_2d(\r\n                unwarped_src_face,\r\n                transformation_from_points(trg_landmarks, src_landmarks),\r\n                (h, w, 3),\r\n            )\r\n\r\n            mask = mask_from_points((h, w), trg_landmarks)\r\n            mask_src = np.mean(warped_src_face, axis=2) > 0\r\n            mask = np.asarray(mask * mask_src, dtype=np.uint8)\r\n\r\n        # perform base blending operation\r\n        faceswap_output_cv2 = self._perform_base_blending(\r\n            mask, trg_face, warped_src_face\r\n        )\r\n\r\n        x, y, w, h = trg_shape\r\n        target_faceswap_img = target_image_cv2.copy()\r\n        target_faceswap_img[y : y + h, x : x + w] = faceswap_output_cv2\r\n\r\n        faceswap_output_image = Image.fromarray(\r\n            cv2.cvtColor(target_faceswap_img, cv2.COLOR_BGR2RGB)\r\n        )\r\n\r\n        if save_path:\r\n            faceswap_output_image.save(save_path, compress_level=0)\r\n\r\n        return faceswap_output_image\r\n\r\n    def _face_and_landmark_detection(self, image):\r\n        \"\"\"perform face detection and get facial landmarks\"\"\"\r\n        # get face bounding box\r\n        faces = self.detector(image)\r\n        idx = np.argmax(\r\n            [\r\n                (face.right() - face.left()) * (face.bottom() - face.top())\r\n                for face in faces\r\n            ]\r\n        )\r\n        bbox = faces[idx]\r\n\r\n        # predict landmarks\r\n        landmarks_dlib = self.predictor(image=image, box=bbox)\r\n        face_landmarks = np.array([[p.x, p.y] for p in landmarks_dlib.parts()])\r\n\r\n        return face_landmarks\r\n\r\n    def _process_face(self, image, r=10):\r\n        \"\"\"process detected face and landmarks\"\"\"\r\n        # get landmarks\r\n        landmarks = self._face_and_landmark_detection(image)\r\n\r\n        # get image dimensions\r\n        im_w, im_h = image.shape[:2]\r\n\r\n        # get face edges\r\n        left, top = np.min(landmarks, 0)\r\n        right, bottom = np.max(landmarks, 0)\r\n\r\n        # scale landmarks and face edges\r\n        x, y = max(0, left - r), max(0, top - r)\r\n        w, h = min(right + r, im_h) - x, min(bottom + r, im_w) - y\r\n\r\n        return (\r\n            landmarks - np.asarray([[x, y]]),\r\n            (x, y, w, h),\r\n            image[y : y + h, x : x + w],\r\n        )\r\n\r\n    @staticmethod\r\n    def _perform_base_blending(mask, trg_face, warped_src_face):\r\n        \"\"\"perform Poisson blending using mask\"\"\"\r\n\r\n        # Shrink the mask\r\n        kernel = np.ones((10, 10), np.uint8)\r\n        mask = cv2.erode(mask, kernel, iterations=1)\r\n\r\n        # Poisson Blending\r\n        r = cv2.boundingRect(mask)\r\n        center = (r[0] + int(r[2] / 2), r[1] + int(r[3] / 2))\r\n\r\n        output_cv2 = cv2.seamlessClone(\r\n            warped_src_face, trg_face, mask, center, cv2.NORMAL_CLONE\r\n        )\r\n        return output_cv2\r\n\r\n    @classmethod\r\n    def from_config(cls, config: Dict[str, Any]) -> \"Swap\":\r\n        \"\"\"\r\n        Instantiates a Swap from a configuration.\r\n        Args:\r\n            config: A configuration for a Swap.\r\n        Returns:\r\n            A Swap instance.\r\n        \"\"\"\r\n        # get config\r\n        swap_config = config.get(\"swap\")\r\n\r\n        # return instance\r\n        return cls(\r\n            predictor_path=swap_config.get(\"predictor_path\", CACHED_PREDICTOR_PATH),\r\n            warp_2d=swap_config.get(\"warp_2d\", True),\r\n            correct_color=swap_config.get(\"correct_color\", True),\r\n            end=swap_config.get(\"end\", 48),\r\n        )\r\n"
  },
  {
    "path": "src/dot/fomm/__init__.py",
    "content": "#!/usr/bin/env python3\n\nfrom .option import FOMMOption\n\n__all__ = [\"FOMMOption\"]\n"
  },
  {
    "path": "src/dot/fomm/config/vox-adv-256.yaml",
    "content": "---\ndataset_params:\n    root_dir: data/vox-png\n    frame_shape: [256, 256, 3]\n    id_sampling: true\n    pairs_list: data/vox256.csv\n    augmentation_params:\n        flip_param:\n            horizontal_flip: true\n            time_flip: true\n        jitter_param:\n            brightness: 0.1\n            contrast: 0.1\n            saturation: 0.1\n            hue: 0.1\n\n\nmodel_params:\n    common_params:\n        num_kp: 10\n        num_channels: 3\n        estimate_jacobian: true\n    kp_detector_params:\n        temperature: 0.1\n        block_expansion: 32\n        max_features: 1024\n        scale_factor: 0.25\n        num_blocks: 5\n    generator_params:\n        block_expansion: 64\n        max_features: 512\n        num_down_blocks: 2\n        num_bottleneck_blocks: 6\n        estimate_occlusion_map: true\n        dense_motion_params:\n            block_expansion: 64\n            max_features: 1024\n            num_blocks: 5\n            scale_factor: 0.25\n    discriminator_params:\n        scales: [1]\n        block_expansion: 32\n        max_features: 512\n        num_blocks: 4\n        use_kp: true\n\n\ntrain_params:\n    num_epochs: 150\n    num_repeats: 75\n    epoch_milestones: []\n    lr_generator: 2.0e-4\n    lr_discriminator: 2.0e-4\n    lr_kp_detector: 2.0e-4\n    batch_size: 36\n    scales: [1, 0.5, 0.25, 0.125]\n    checkpoint_freq: 50\n    transform_params:\n        sigma_affine: 0.05\n        sigma_tps: 0.005\n        points_tps: 5\n    loss_weights:\n        generator_gan: 1\n        discriminator_gan: 1\n        feature_matching: [10, 10, 10, 10]\n        perceptual: [10, 10, 10, 10, 10]\n        equivariance_value: 10\n        equivariance_jacobian: 10\n\nreconstruction_params:\n    num_videos: 1000\n    format: .mp4\n\nanimate_params:\n    num_pairs: 50\n    format: .mp4\n    normalization_params:\n        adapt_movement_scale: false\n        use_relative_movement: true\n        use_relative_jacobian: true\n\nvisualizer_params:\n    kp_size: 5\n    draw_border: true\n    colormap: gist_rainbow\n"
  },
  {
    "path": "src/dot/fomm/face_alignment.py",
    "content": "import warnings\nfrom enum import IntEnum\n\nimport numpy as np\nimport torch\nfrom face_alignment.folder_data import FolderData\nfrom face_alignment.utils import crop, draw_gaussian, flip, get_image, get_preds_fromhm\nfrom packaging import version\nfrom tqdm import tqdm\n\n\nclass LandmarksType(IntEnum):\n    \"\"\"Enum class defining the type of landmarks to detect.\n\n    ``TWO_D`` - the detected points ``(x,y)`` are detected in a 2D space and follow the visible contour of the face\n    ``TWO_HALF_D`` - this points represent the projection of the 3D points into 3D\n    ``THREE_D`` - detect the points ``(x,y,z)``` in a 3D space\n\n    \"\"\"\n\n    TWO_D = 1\n    TWO_HALF_D = 2\n    THREE_D = 3\n\n\nclass NetworkSize(IntEnum):\n    # TINY = 1\n    # SMALL = 2\n    # MEDIUM = 3\n    LARGE = 4\n\n\ndefault_model_urls = {\n    \"2DFAN-4\": \"saved_models/face_alignment/2DFAN4-cd938726ad.zip\",\n    \"3DFAN-4\": \"saved_models/face_alignment/3DFAN4-4a694010b9.zip\",\n    \"depth\": \"saved_models/face_alignment/depth-6c4283c0e0.zip\",\n}\n\nmodels_urls = {\n    \"1.6\": {\n        \"2DFAN-4\": \"saved_models/face_alignment/2DFAN4_1.6-c827573f02.zip\",\n        \"3DFAN-4\": \"saved_models/face_alignment/3DFAN4_1.6-ec5cf40a1d.zip\",\n        \"depth\": \"saved_models/face_alignment/depth_1.6-2aa3f18772.zip\",\n    },\n    \"1.5\": {\n        \"2DFAN-4\": \"saved_models/face_alignment/2DFAN4_1.5-a60332318a.zip\",\n        \"3DFAN-4\": \"saved_models/face_alignment/3DFAN4_1.5-176570af4d.zip\",\n        \"depth\": \"saved_models/face_alignment/depth_1.5-bc10f98e39.zip\",\n    },\n}\n\n\nclass FaceAlignment:\n    def __init__(\n        self,\n        landmarks_type,\n        network_size=NetworkSize.LARGE,\n        device=\"cuda\",\n        dtype=torch.float32,\n        flip_input=False,\n        face_detector=\"sfd\",\n        face_detector_kwargs=None,\n        verbose=False,\n    ):\n        self.device = device\n        self.flip_input = flip_input\n        self.landmarks_type = landmarks_type\n        self.verbose = verbose\n        self.dtype = dtype\n\n        if version.parse(torch.__version__) < version.parse(\"1.5.0\"):\n            raise ImportError(\n                \"Unsupported pytorch version detected. Minimum supported version of pytorch: 1.5.0\\\n                            Either upgrade (recommended) your pytorch setup, or downgrade to face-alignment 1.2.0\"\n            )\n\n        network_size = int(network_size)\n        pytorch_version = torch.__version__\n        if \"dev\" in pytorch_version:\n            pytorch_version = pytorch_version.rsplit(\".\", 2)[0]\n        else:\n            pytorch_version = pytorch_version.rsplit(\".\", 1)[0]\n\n        if \"cuda\" in device:\n            torch.backends.cudnn.benchmark = True\n\n        # Get the face detector\n        face_detector_module = __import__(\n            \"face_alignment.detection.\" + face_detector,\n            globals(),\n            locals(),\n            [face_detector],\n            0,\n        )\n        face_detector_kwargs = face_detector_kwargs or {}\n        self.face_detector = face_detector_module.FaceDetector(\n            device=device, verbose=verbose, **face_detector_kwargs\n        )\n\n        # Initialise the face alignemnt networks\n        if landmarks_type == LandmarksType.TWO_D:\n            network_name = \"2DFAN-\" + str(network_size)\n        else:\n            network_name = \"3DFAN-\" + str(network_size)\n        self.face_alignment_net = torch.jit.load(\n            models_urls.get(pytorch_version, default_model_urls)[network_name]\n        )\n\n        self.face_alignment_net.to(device, dtype=dtype)\n        self.face_alignment_net.eval()\n\n        # Initialiase the depth prediciton network\n        if landmarks_type == LandmarksType.THREE_D:\n            self.depth_prediciton_net = torch.jit.load(\n                models_urls.get(pytorch_version, default_model_urls)[\"depth\"]\n            )\n\n            self.depth_prediciton_net.to(device, dtype=dtype)\n            self.depth_prediciton_net.eval()\n\n    def get_landmarks(\n        self,\n        image_or_path,\n        detected_faces=None,\n        return_bboxes=False,\n        return_landmark_score=False,\n    ):\n        \"\"\"Deprecated, please use get_landmarks_from_image\n\n        Arguments:\n            image_or_path {string or numpy.array or torch.tensor} -- The input image or path to it\n\n        Keyword Arguments:\n            detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found\n            in the image (default: {None})\n            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.\n            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.\n        \"\"\"\n        return self.get_landmarks_from_image(\n            image_or_path, detected_faces, return_bboxes, return_landmark_score\n        )\n\n    @torch.no_grad()\n    def get_landmarks_from_image(\n        self,\n        image_or_path,\n        detected_faces=None,\n        return_bboxes=False,\n        return_landmark_score=False,\n    ):\n        \"\"\"Predict the landmarks for each face present in the image.\n\n        This function predicts a set of 68 2D or 3D images, one for each image present.\n        If detect_faces is None the method will also run a face detector.\n\n         Arguments:\n            image_or_path {string or numpy.array or torch.tensor} -- The input image or path to it.\n\n        Keyword Arguments:\n            detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found\n            in the image (default: {None})\n            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.\n            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.\n\n        Return:\n            result:\n                1. if both return_bboxes and return_landmark_score are False, result will be:\n                    landmark\n                2. Otherwise, result will be one of the following, depending on the actual value of return_* arguments.\n                    (landmark, landmark_score, detected_face)\n                    (landmark, None,           detected_face)\n                    (landmark, landmark_score, None         )\n        \"\"\"\n        image = get_image(image_or_path)  # noqa\n\n        if detected_faces is None:\n            detected_faces = self.face_detector.detect_from_image(image.copy())\n\n        if len(detected_faces) == 0:\n            warnings.warn(\"No faces were detected.\")\n            if return_bboxes or return_landmark_score:\n                return None, None, None\n            else:\n                return None\n\n        landmarks = []\n        landmarks_scores = []\n        for i, d in enumerate(detected_faces):\n            center = torch.tensor(\n                [d[2] - (d[2] - d[0]) / 2.0, d[3] - (d[3] - d[1]) / 2.0]\n            )\n            center[1] = center[1] - (d[3] - d[1]) * 0.12\n            scale = (d[2] - d[0] + d[3] - d[1]) / self.face_detector.reference_scale\n\n            inp = crop(image, center, scale)  # noqa\n            inp = torch.from_numpy(inp.transpose((2, 0, 1))).float()\n\n            inp = inp.to(self.device, dtype=self.dtype)\n            inp.div_(255.0).unsqueeze_(0)\n\n            out = self.face_alignment_net(inp).detach()\n            if self.flip_input:\n                out += flip(\n                    self.face_alignment_net(flip(inp)).detach(), is_label=True\n                )  # noqa\n            out = out.to(device=\"cpu\", dtype=torch.float32).numpy()\n\n            pts, pts_img, scores = get_preds_fromhm(out, center.numpy(), scale)  # noqa\n            pts, pts_img = torch.from_numpy(pts), torch.from_numpy(pts_img)\n            pts, pts_img = pts.view(68, 2) * 4, pts_img.view(68, 2)\n            scores = scores.squeeze(0)\n\n            if self.landmarks_type == LandmarksType.THREE_D:\n                heatmaps = np.zeros((68, 256, 256), dtype=np.float32)\n                for i in range(68):\n                    if pts[i, 0] > 0 and pts[i, 1] > 0:\n                        heatmaps[i] = draw_gaussian(heatmaps[i], pts[i], 2)  # noqa\n                heatmaps = torch.from_numpy(heatmaps).unsqueeze_(0)\n\n                heatmaps = heatmaps.to(self.device, dtype=self.dtype)\n                depth_pred = (\n                    self.depth_prediciton_net(torch.cat((inp, heatmaps), 1))\n                    .data.cpu()\n                    .view(68, 1)\n                    .to(dtype=torch.float32)\n                )\n                pts_img = torch.cat(\n                    (pts_img, depth_pred * (1.0 / (256.0 / (200.0 * scale)))), 1\n                )\n\n            landmarks.append(pts_img.numpy())\n            landmarks_scores.append(scores)\n\n        if not return_bboxes:\n            detected_faces = None\n        if not return_landmark_score:\n            landmarks_scores = None\n        if return_bboxes or return_landmark_score:\n            return landmarks, landmarks_scores, detected_faces\n        else:\n            return landmarks\n\n    @torch.no_grad()\n    def get_landmarks_from_batch(\n        self,\n        image_batch,\n        detected_faces=None,\n        return_bboxes=False,\n        return_landmark_score=False,\n    ):\n        \"\"\"Predict the landmarks for each face present in the image.\n\n        This function predicts a set of 68 2D or 3D images, one for each image in a batch in parallel.\n        If detect_faces is None the method will also run a face detector.\n\n         Arguments:\n            image_batch {torch.tensor} -- The input images batch\n\n        Keyword Arguments:\n            detected_faces {list of numpy.array} -- list of bounding boxes, one for each face found\n            in the image (default: {None})\n            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.\n            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.\n\n        Return:\n            result:\n                1. if both return_bboxes and return_landmark_score are False, result will be:\n                    landmarks\n                2. Otherwise, result will be one of the following, depending on the actual value of return_* arguments.\n                    (landmark, landmark_score, detected_face)\n                    (landmark, None,           detected_face)\n                    (landmark, landmark_score, None         )\n        \"\"\"\n\n        if detected_faces is None:\n            detected_faces = self.face_detector.detect_from_batch(image_batch)\n\n        if len(detected_faces) == 0:\n            warnings.warn(\"No faces were detected.\")\n            if return_bboxes or return_landmark_score:\n                return None, None, None\n            else:\n                return None\n\n        landmarks = []\n        landmarks_scores_list = []\n        # A batch for each frame\n        for i, faces in enumerate(detected_faces):\n            res = self.get_landmarks_from_image(\n                image_batch[i].cpu().numpy().transpose(1, 2, 0),\n                detected_faces=faces,\n                return_landmark_score=return_landmark_score,\n            )\n            if return_landmark_score:\n                landmark_set, landmarks_scores, _ = res\n                landmarks_scores_list.append(landmarks_scores)\n            else:\n                landmark_set = res\n            # Bacward compatibility\n            if landmark_set is not None:\n                landmark_set = np.concatenate(landmark_set, axis=0)\n            else:\n                landmark_set = []\n            landmarks.append(landmark_set)\n\n        if not return_bboxes:\n            detected_faces = None\n        if not return_landmark_score:\n            landmarks_scores_list = None\n        if return_bboxes or return_landmark_score:\n            return landmarks, landmarks_scores_list, detected_faces\n        else:\n            return landmarks\n\n    def get_landmarks_from_directory(\n        self,\n        path,\n        extensions=[\".jpg\", \".png\"],\n        recursive=True,\n        show_progress_bar=True,\n        return_bboxes=False,\n        return_landmark_score=False,\n    ):\n        \"\"\"Scan a directory for images with a given extension type(s) and predict the landmarks for each\n            face present in the images found.\n\n         Arguments:\n            path {str} -- path to the target directory containing the images\n\n        Keyword Arguments:\n            extensions {list of str} -- list containing the image extensions considered (default: ['.jpg', '.png'])\n            recursive {boolean} -- If True, scans for images recursively (default: True)\n            show_progress_bar {boolean} -- If True displays a progress bar (default: True)\n            return_bboxes {boolean} -- If True, return the face bounding boxes in addition to the keypoints.\n            return_landmark_score {boolean} -- If True, return the keypoint scores along with the keypoints.\n        \"\"\"\n        dataset = FolderData(\n            path,\n            self.face_detector.tensor_or_path_to_ndarray,\n            extensions,\n            recursive,\n            self.verbose,\n        )\n        dataloader = torch.utils.data.DataLoader(\n            dataset, batch_size=1, shuffle=False, num_workers=2, prefetch_factor=4\n        )\n\n        predictions = {}\n        for (image_path, image) in tqdm(dataloader, disable=not show_progress_bar):\n            image_path, image = image_path[0], image[0]\n            bounding_boxes = self.face_detector.detect_from_image(image)\n            if return_bboxes or return_landmark_score:\n                preds, bbox, score = self.get_landmarks_from_image(\n                    image,\n                    bounding_boxes,\n                    return_bboxes=return_bboxes,\n                    return_landmark_score=return_landmark_score,\n                )\n                predictions[image_path] = (preds, bbox, score)\n            else:\n                preds = self.get_landmarks_from_image(image, bounding_boxes)\n                predictions[image_path] = preds\n\n        return predictions\n"
  },
  {
    "path": "src/dot/fomm/modules/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/fomm/modules/dense_motion.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom .util import AntiAliasInterpolation2d, Hourglass, kp2gaussian, make_coordinate_grid\n\n\nclass DenseMotionNetwork(nn.Module):\n    \"\"\"\n    Module that predicting a dense motion\n    from sparse motion representation given\n    by kp_source and kp_driving\n    \"\"\"\n\n    def __init__(\n        self,\n        block_expansion,\n        num_blocks,\n        max_features,\n        num_kp,\n        num_channels,\n        estimate_occlusion_map=False,\n        scale_factor=1,\n        kp_variance=0.01,\n    ):\n\n        super(DenseMotionNetwork, self).__init__()\n        self.hourglass = Hourglass(\n            block_expansion=block_expansion,\n            in_features=(num_kp + 1) * (num_channels + 1),\n            max_features=max_features,\n            num_blocks=num_blocks,\n        )\n\n        self.mask = nn.Conv2d(\n            self.hourglass.out_filters, num_kp + 1, kernel_size=(7, 7), padding=(3, 3)\n        )\n\n        if estimate_occlusion_map:\n            self.occlusion = nn.Conv2d(\n                self.hourglass.out_filters, 1, kernel_size=(7, 7), padding=(3, 3)\n            )\n        else:\n            self.occlusion = None\n\n        self.num_kp = num_kp\n        self.scale_factor = scale_factor\n        self.kp_variance = kp_variance\n\n        if self.scale_factor != 1:\n            self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)\n\n    def create_heatmap_representations(self, source_image, kp_driving, kp_source):\n        \"\"\"\n        Eq 6. in the paper H_k(z)\n        \"\"\"\n        spatial_size = source_image.shape[2:]\n        gaussian_driving = kp2gaussian(\n            kp_driving, spatial_size=spatial_size, kp_variance=self.kp_variance\n        )\n\n        gaussian_source = kp2gaussian(\n            kp_source, spatial_size=spatial_size, kp_variance=self.kp_variance\n        )\n        heatmap = gaussian_driving - gaussian_source\n\n        # adding background feature\n        zeros = torch.zeros(heatmap.shape[0], 1, spatial_size[0], spatial_size[1]).type(\n            heatmap.type()\n        )\n        heatmap = torch.cat([zeros, heatmap], dim=1)\n        heatmap = heatmap.unsqueeze(2)\n        return heatmap\n\n    def create_sparse_motions(self, source_image, kp_driving, kp_source):\n        \"\"\"\n        Eq 4. in the paper T_{s<-d}(z)\n        \"\"\"\n        bs, _, h, w = source_image.shape\n        identity_grid = make_coordinate_grid((h, w), type=kp_source[\"value\"].type())\n        identity_grid = identity_grid.view(1, 1, h, w, 2)\n        coordinate_grid = identity_grid - kp_driving[\"value\"].view(\n            bs, self.num_kp, 1, 1, 2\n        )\n        if \"jacobian\" in kp_driving:\n            jacobian = torch.matmul(\n                kp_source[\"jacobian\"], torch.inverse(kp_driving[\"jacobian\"])\n            )\n            jacobian = jacobian.unsqueeze(-3).unsqueeze(-3)\n            jacobian = jacobian.repeat(1, 1, h, w, 1, 1)\n            coordinate_grid = torch.matmul(jacobian, coordinate_grid.unsqueeze(-1))\n            coordinate_grid = coordinate_grid.squeeze(-1)\n\n        driving_to_source = coordinate_grid + kp_source[\"value\"].view(\n            bs, self.num_kp, 1, 1, 2\n        )\n\n        # adding background feature\n        identity_grid = identity_grid.repeat(bs, 1, 1, 1, 1)\n        sparse_motions = torch.cat([identity_grid, driving_to_source], dim=1)\n        return sparse_motions\n\n    def create_deformed_source_image(self, source_image, sparse_motions):\n        \"\"\"\n        Eq 7. in the paper hat{T}_{s<-d}(z)\n        \"\"\"\n        bs, _, h, w = source_image.shape\n        source_repeat = (\n            source_image.unsqueeze(1)\n            .unsqueeze(1)\n            .repeat(1, self.num_kp + 1, 1, 1, 1, 1)\n        )\n        source_repeat = source_repeat.view(bs * (self.num_kp + 1), -1, h, w)\n        sparse_motions = sparse_motions.view((bs * (self.num_kp + 1), h, w, -1))\n        sparse_deformed = F.grid_sample(source_repeat, sparse_motions)\n        sparse_deformed = sparse_deformed.view((bs, self.num_kp + 1, -1, h, w))\n        return sparse_deformed\n\n    def forward(self, source_image, kp_driving, kp_source):\n        if self.scale_factor != 1:\n            source_image = self.down(source_image)\n\n        bs, _, h, w = source_image.shape\n\n        out_dict = dict()\n        heatmap_representation = self.create_heatmap_representations(\n            source_image, kp_driving, kp_source\n        )\n        sparse_motion = self.create_sparse_motions(source_image, kp_driving, kp_source)\n        deformed_source = self.create_deformed_source_image(source_image, sparse_motion)\n        out_dict[\"sparse_deformed\"] = deformed_source\n\n        input = torch.cat([heatmap_representation, deformed_source], dim=2)\n        input = input.view(bs, -1, h, w)\n\n        prediction = self.hourglass(input)\n\n        mask = self.mask(prediction)\n        mask = F.softmax(mask, dim=1)\n        out_dict[\"mask\"] = mask\n        mask = mask.unsqueeze(2)\n        sparse_motion = sparse_motion.permute(0, 1, 4, 2, 3)\n        deformation = (sparse_motion * mask).sum(dim=1)\n        deformation = deformation.permute(0, 2, 3, 1)\n\n        out_dict[\"deformation\"] = deformation\n\n        # Sec. 3.2 in the paper\n        if self.occlusion:\n            occlusion_map = torch.sigmoid(self.occlusion(prediction))\n            out_dict[\"occlusion_map\"] = occlusion_map\n\n        return out_dict\n"
  },
  {
    "path": "src/dot/fomm/modules/generator_optim.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom .dense_motion import DenseMotionNetwork\nfrom .util import DownBlock2d, ResBlock2d, SameBlock2d, UpBlock2d\n\n\nclass OcclusionAwareGenerator(nn.Module):\n    \"\"\"\n    Generator that given source image and keypoints\n    try to transform image according to movement trajectories\n    induced by keypoints. Generator follows Johnson architecture.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_channels,\n        num_kp,\n        block_expansion,\n        max_features,\n        num_down_blocks,\n        num_bottleneck_blocks,\n        estimate_occlusion_map=False,\n        dense_motion_params=None,\n        estimate_jacobian=False,\n    ):\n        super(OcclusionAwareGenerator, self).__init__()\n\n        if dense_motion_params is not None:\n            self.dense_motion_network = DenseMotionNetwork(\n                num_kp=num_kp,\n                num_channels=num_channels,\n                estimate_occlusion_map=estimate_occlusion_map,\n                **dense_motion_params\n            )\n        else:\n            self.dense_motion_network = None\n\n        self.first = SameBlock2d(\n            num_channels, block_expansion, kernel_size=(7, 7), padding=(3, 3)\n        )\n\n        down_blocks = []\n        for i in range(num_down_blocks):\n            in_features = min(max_features, block_expansion * (2**i))\n            out_features = min(max_features, block_expansion * (2 ** (i + 1)))\n            down_blocks.append(\n                DownBlock2d(\n                    in_features, out_features, kernel_size=(3, 3), padding=(1, 1)\n                )\n            )\n        self.down_blocks = nn.ModuleList(down_blocks)\n\n        up_blocks = []\n        for i in range(num_down_blocks):\n            in_features = min(\n                max_features, block_expansion * (2 ** (num_down_blocks - i))\n            )\n            out_features = min(\n                max_features, block_expansion * (2 ** (num_down_blocks - i - 1))\n            )\n            up_blocks.append(\n                UpBlock2d(in_features, out_features, kernel_size=(3, 3), padding=(1, 1))\n            )\n        self.up_blocks = nn.ModuleList(up_blocks)\n\n        self.bottleneck = torch.nn.Sequential()\n        in_features = min(max_features, block_expansion * (2**num_down_blocks))\n        for i in range(num_bottleneck_blocks):\n            self.bottleneck.add_module(\n                \"r\" + str(i),\n                ResBlock2d(in_features, kernel_size=(3, 3), padding=(1, 1)),\n            )\n\n        self.final = nn.Conv2d(\n            block_expansion, num_channels, kernel_size=(7, 7), padding=(3, 3)\n        )\n        self.estimate_occlusion_map = estimate_occlusion_map\n        self.num_channels = num_channels\n\n        self.enc_features = None\n\n    def deform_input(self, inp, deformation):\n        _, h_old, w_old, _ = deformation.shape\n        _, _, h, w = inp.shape\n        if h_old != h or w_old != w:\n            deformation = deformation.permute(0, 3, 1, 2)\n            deformation = F.interpolate(deformation, size=(h, w), mode=\"bilinear\")\n            deformation = deformation.permute(0, 2, 3, 1)\n        return F.grid_sample(inp, deformation)\n\n    def encode_source(self, source_image):\n        # Encoding (downsampling) part\n        out = self.first(source_image)\n        for i in range(len(self.down_blocks)):\n            out = self.down_blocks[i](out)\n\n        self.enc_features = out\n\n    def forward(self, source_image, kp_driving, kp_source, optim_ret=True):\n        assert self.enc_features is not None, \"Call encode_source()\"\n        out = self.enc_features\n\n        # Transforming feature representation\n        # according to deformation and occlusion\n        output_dict = {}\n        if self.dense_motion_network is not None:\n            dense_motion = self.dense_motion_network(\n                source_image=source_image, kp_driving=kp_driving, kp_source=kp_source\n            )\n            output_dict[\"mask\"] = dense_motion[\"mask\"]\n            output_dict[\"sparse_deformed\"] = dense_motion[\"sparse_deformed\"]\n\n            if \"occlusion_map\" in dense_motion:\n                occlusion_map = dense_motion[\"occlusion_map\"]\n                output_dict[\"occlusion_map\"] = occlusion_map\n            else:\n                occlusion_map = None\n            deformation = dense_motion[\"deformation\"]\n            out = self.deform_input(out, deformation)\n\n            if occlusion_map is not None:\n                if (out.shape[2] != occlusion_map.shape[2]) or (\n                    out.shape[3] != occlusion_map.shape[3]\n                ):\n                    occlusion_map = F.interpolate(\n                        occlusion_map, size=out.shape[2:], mode=\"bilinear\"\n                    )\n                out = out * occlusion_map\n\n            if not optim_ret:\n                output_dict[\"deformed\"] = self.deform_input(source_image, deformation)\n\n        # Decoding part\n        out = self.bottleneck(out)\n        for i in range(len(self.up_blocks)):\n            out = self.up_blocks[i](out)\n        out = self.final(out)\n        out = F.sigmoid(out)\n\n        output_dict[\"prediction\"] = out\n\n        return output_dict\n"
  },
  {
    "path": "src/dot/fomm/modules/keypoint_detector.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom .util import AntiAliasInterpolation2d, Hourglass, make_coordinate_grid\n\n\nclass KPDetector(nn.Module):\n    \"\"\"\n    Detecting a keypoints. Return keypoint position\n    and jacobian near each keypoint.\n    \"\"\"\n\n    def __init__(\n        self,\n        block_expansion,\n        num_kp,\n        num_channels,\n        max_features,\n        num_blocks,\n        temperature,\n        estimate_jacobian=False,\n        scale_factor=1,\n        single_jacobian_map=False,\n        pad=0,\n    ):\n\n        super(KPDetector, self).__init__()\n\n        self.predictor = Hourglass(\n            block_expansion,\n            in_features=num_channels,\n            max_features=max_features,\n            num_blocks=num_blocks,\n        )\n\n        self.kp = nn.Conv2d(\n            in_channels=self.predictor.out_filters,\n            out_channels=num_kp,\n            kernel_size=(7, 7),\n            padding=pad,\n        )\n\n        if estimate_jacobian:\n            self.num_jacobian_maps = 1 if single_jacobian_map else num_kp\n            self.jacobian = nn.Conv2d(\n                in_channels=self.predictor.out_filters,\n                out_channels=4 * self.num_jacobian_maps,\n                kernel_size=(7, 7),\n                padding=pad,\n            )\n            self.jacobian.weight.data.zero_()\n            self.jacobian.bias.data.copy_(\n                torch.tensor([1, 0, 0, 1] * self.num_jacobian_maps, dtype=torch.float)\n            )\n        else:\n            self.jacobian = None\n\n        self.temperature = temperature\n        self.scale_factor = scale_factor\n        if self.scale_factor != 1:\n            self.down = AntiAliasInterpolation2d(num_channels, self.scale_factor)\n\n    def gaussian2kp(self, heatmap):\n        \"\"\"\n        Extract the mean and from a heatmap\n        \"\"\"\n        shape = heatmap.shape\n        heatmap = heatmap.unsqueeze(-1)\n        grid = (\n            make_coordinate_grid(shape[2:], heatmap.type()).unsqueeze_(0).unsqueeze_(0)\n        )\n        value = (heatmap * grid).sum(dim=(2, 3))\n        kp = {\"value\": value}\n\n        return kp\n\n    def forward(self, x):\n        if self.scale_factor != 1:\n            x = self.down(x)\n\n        feature_map = self.predictor(x)\n        prediction = self.kp(feature_map)\n\n        final_shape = prediction.shape\n        heatmap = prediction.view(final_shape[0], final_shape[1], -1)\n        heatmap = F.softmax(heatmap / self.temperature, dim=2)\n        heatmap = heatmap.view(*final_shape)\n\n        out = self.gaussian2kp(heatmap)\n\n        if self.jacobian is not None:\n            jacobian_map = self.jacobian(feature_map)\n            jacobian_map = jacobian_map.reshape(\n                final_shape[0],\n                self.num_jacobian_maps,\n                4,\n                final_shape[2],\n                final_shape[3],\n            )\n            heatmap = heatmap.unsqueeze(2)\n\n            jacobian = heatmap * jacobian_map\n            jacobian = jacobian.view(final_shape[0], final_shape[1], 4, -1)\n            jacobian = jacobian.sum(dim=-1)\n            jacobian = jacobian.view(jacobian.shape[0], jacobian.shape[1], 2, 2)\n            out[\"jacobian\"] = jacobian\n\n        return out\n"
  },
  {
    "path": "src/dot/fomm/modules/util.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom ..sync_batchnorm.batchnorm import SynchronizedBatchNorm2d as BatchNorm2d\n\n\ndef kp2gaussian(kp, spatial_size, kp_variance):\n    \"\"\"\n    Transform a keypoint into gaussian like representation\n    \"\"\"\n    mean = kp[\"value\"]\n\n    coordinate_grid = make_coordinate_grid(spatial_size, mean.type())\n    number_of_leading_dimensions = len(mean.shape) - 1\n    shape = (1,) * number_of_leading_dimensions + coordinate_grid.shape\n    coordinate_grid = coordinate_grid.view(*shape)\n    repeats = mean.shape[:number_of_leading_dimensions] + (1, 1, 1)\n    coordinate_grid = coordinate_grid.repeat(*repeats)\n\n    # Preprocess kp shape\n    shape = mean.shape[:number_of_leading_dimensions] + (1, 1, 2)\n    mean = mean.view(*shape)\n\n    mean_sub = coordinate_grid - mean\n\n    out = torch.exp(-0.5 * (mean_sub**2).sum(-1) / kp_variance)\n\n    return out\n\n\ndef make_coordinate_grid(spatial_size, type):\n    \"\"\"\n    Create a meshgrid [-1,1] x [-1,1] of given spatial_size.\n    \"\"\"\n    h, w = spatial_size\n    x = torch.arange(w).type(type)\n    y = torch.arange(h).type(type)\n\n    x = 2 * (x / (w - 1)) - 1\n    y = 2 * (y / (h - 1)) - 1\n\n    yy = y.view(-1, 1).repeat(1, w)\n    xx = x.view(1, -1).repeat(h, 1)\n\n    meshed = torch.cat([xx.unsqueeze_(2), yy.unsqueeze_(2)], 2)\n\n    return meshed\n\n\nclass ResBlock2d(nn.Module):\n    \"\"\"\n    Res block, preserve spatial resolution.\n    \"\"\"\n\n    def __init__(self, in_features, kernel_size, padding):\n        super(ResBlock2d, self).__init__()\n        self.conv1 = nn.Conv2d(\n            in_channels=in_features,\n            out_channels=in_features,\n            kernel_size=kernel_size,\n            padding=padding,\n        )\n\n        self.conv2 = nn.Conv2d(\n            in_channels=in_features,\n            out_channels=in_features,\n            kernel_size=kernel_size,\n            padding=padding,\n        )\n\n        self.norm1 = BatchNorm2d(in_features, affine=True)\n        self.norm2 = BatchNorm2d(in_features, affine=True)\n\n    def forward(self, x):\n        out = self.norm1(x)\n        out = F.relu(out)\n        out = self.conv1(out)\n        out = self.norm2(out)\n        out = F.relu(out)\n        out = self.conv2(out)\n        out += x\n        return out\n\n\nclass UpBlock2d(nn.Module):\n    \"\"\"\n    Upsampling block for use in decoder.\n    \"\"\"\n\n    def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n\n        super(UpBlock2d, self).__init__()\n\n        self.conv = nn.Conv2d(\n            in_channels=in_features,\n            out_channels=out_features,\n            kernel_size=kernel_size,\n            padding=padding,\n            groups=groups,\n        )\n\n        self.norm = BatchNorm2d(out_features, affine=True)\n\n    def forward(self, x):\n        out = F.interpolate(x, scale_factor=2)\n        out = self.conv(out)\n        out = self.norm(out)\n        out = F.relu(out)\n        return out\n\n\nclass DownBlock2d(nn.Module):\n    \"\"\"\n    Downsampling block for use in encoder.\n    \"\"\"\n\n    def __init__(self, in_features, out_features, kernel_size=3, padding=1, groups=1):\n\n        super(DownBlock2d, self).__init__()\n        self.conv = nn.Conv2d(\n            in_channels=in_features,\n            out_channels=out_features,\n            kernel_size=kernel_size,\n            padding=padding,\n            groups=groups,\n        )\n\n        self.norm = BatchNorm2d(out_features, affine=True)\n        self.pool = nn.AvgPool2d(kernel_size=(2, 2))\n\n    def forward(self, x):\n        out = self.conv(x)\n        out = self.norm(out)\n        out = F.relu(out)\n        out = self.pool(out)\n        return out\n\n\nclass SameBlock2d(nn.Module):\n    \"\"\"\n    Simple block, preserve spatial resolution.\n    \"\"\"\n\n    def __init__(self, in_features, out_features, groups=1, kernel_size=3, padding=1):\n\n        super(SameBlock2d, self).__init__()\n        self.conv = nn.Conv2d(\n            in_channels=in_features,\n            out_channels=out_features,\n            kernel_size=kernel_size,\n            padding=padding,\n            groups=groups,\n        )\n\n        self.norm = BatchNorm2d(out_features, affine=True)\n\n    def forward(self, x):\n        out = self.conv(x)\n        out = self.norm(out)\n        out = F.relu(out)\n        return out\n\n\nclass Encoder(nn.Module):\n    \"\"\"\n    Hourglass Encoder\n    \"\"\"\n\n    def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\n\n        super(Encoder, self).__init__()\n\n        down_blocks = []\n        for i in range(num_blocks):\n            down_blocks.append(\n                DownBlock2d(\n                    in_features\n                    if i == 0\n                    else min(max_features, block_expansion * (2**i)),\n                    min(max_features, block_expansion * (2 ** (i + 1))),\n                    kernel_size=3,\n                    padding=1,\n                )\n            )\n\n        self.down_blocks = nn.ModuleList(down_blocks)\n\n    def forward(self, x):\n        outs = [x]\n        for down_block in self.down_blocks:\n            outs.append(down_block(outs[-1]))\n        return outs\n\n\nclass Decoder(nn.Module):\n    \"\"\"\n    Hourglass Decoder\n    \"\"\"\n\n    def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\n\n        super(Decoder, self).__init__()\n\n        up_blocks = []\n\n        for i in range(num_blocks)[::-1]:\n            in_filters = (1 if i == num_blocks - 1 else 2) * min(\n                max_features, block_expansion * (2 ** (i + 1))\n            )\n            out_filters = min(max_features, block_expansion * (2**i))\n            up_blocks.append(\n                UpBlock2d(in_filters, out_filters, kernel_size=3, padding=1)\n            )\n\n        self.up_blocks = nn.ModuleList(up_blocks)\n        self.out_filters = block_expansion + in_features\n\n    def forward(self, x):\n        out = x.pop()\n        for up_block in self.up_blocks:\n            out = up_block(out)\n            skip = x.pop()\n            out = torch.cat([out, skip], dim=1)\n        return out\n\n\nclass Hourglass(nn.Module):\n    \"\"\"\n    Hourglass architecture.\n    \"\"\"\n\n    def __init__(self, block_expansion, in_features, num_blocks=3, max_features=256):\n\n        super(Hourglass, self).__init__()\n        self.encoder = Encoder(block_expansion, in_features, num_blocks, max_features)\n\n        self.decoder = Decoder(block_expansion, in_features, num_blocks, max_features)\n\n        self.out_filters = self.decoder.out_filters\n\n    def forward(self, x):\n        return self.decoder(self.encoder(x))\n\n\nclass AntiAliasInterpolation2d(nn.Module):\n    \"\"\"\n    Band-limited downsampling,\n    for better preservation of the input signal.\n    \"\"\"\n\n    def __init__(self, channels, scale):\n        super(AntiAliasInterpolation2d, self).__init__()\n        sigma = (1 / scale - 1) / 2\n        kernel_size = 2 * round(sigma * 4) + 1\n        self.ka = kernel_size // 2\n        self.kb = self.ka - 1 if kernel_size % 2 == 0 else self.ka\n\n        kernel_size = [kernel_size, kernel_size]\n        sigma = [sigma, sigma]\n        # The gaussian kernel is the product of the\n        # gaussian function of each dimension.\n        kernel = 1\n        meshgrids = torch.meshgrid(\n            [torch.arange(size, dtype=torch.float32) for size in kernel_size],\n            indexing=\"xy\",\n        )\n        for size, std, mgrid in zip(kernel_size, sigma, meshgrids):\n            mean = (size - 1) / 2\n            kernel *= torch.exp(-((mgrid - mean) ** 2) / (2 * std**2))\n\n        # Make sure sum of values in gaussian kernel equals 1.\n        kernel = kernel / torch.sum(kernel)\n        # Reshape to depthwise convolutional weight\n        kernel = kernel.view(1, 1, *kernel.size())\n        kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))\n\n        self.register_buffer(\"weight\", kernel)\n        self.groups = channels\n        self.scale = scale\n\n    def forward(self, input):\n        if self.scale == 1.0:\n            return input\n\n        out = F.pad(input, (self.ka, self.kb, self.ka, self.kb))\n        out = F.conv2d(out, weight=self.weight, groups=self.groups)\n        out = F.interpolate(out, scale_factor=(self.scale, self.scale))\n\n        return out\n"
  },
  {
    "path": "src/dot/fomm/option.py",
    "content": "#!/usr/bin/env python3\n\nimport os\nimport sys\n\nimport cv2\nimport numpy as np\n\nfrom ..commons import ModelOption\nfrom ..commons.cam.cam import (\n    draw_calib_text,\n    draw_face_landmarks,\n    draw_landmark_text,\n    draw_rect,\n    is_new_frame_better,\n)\nfrom ..commons.utils import crop, log, pad_img, resize\nfrom .predictor_local import PredictorLocal\n\n\ndef determine_path():\n    \"\"\"\n    Find the script path\n    \"\"\"\n    try:\n        root = __file__\n        if os.path.islink(root):\n            root = os.path.realpath(root)\n\n        return os.path.dirname(os.path.abspath(root))\n    except Exception as e:\n        print(e)\n        print(\"I'm sorry, but something is wrong.\")\n        print(\"There is no __file__ variable. Please contact the author.\")\n        sys.exit()\n\n\nclass FOMMOption(ModelOption):\n    def __init__(\n        self,\n        use_gpu: bool = True,\n        use_mask: bool = False,\n        crop_size: int = 256,\n        gpen_type: str = None,\n        gpen_path: str = None,\n        offline: bool = False,\n    ):\n        super(FOMMOption, self).__init__(\n            gpen_type=gpen_type,\n            use_gpu=use_gpu,\n            crop_size=crop_size,\n            gpen_path=gpen_path,\n        )\n        # use FOMM offline, video or image file\n        self.offline = offline\n        self.frame_proportion = 0.9\n        self.frame_offset_x = 0\n        self.frame_offset_y = 0\n\n        self.overlay_alpha = 0.0\n        self.preview_flip = False\n        self.output_flip = False\n        self.find_keyframe = False\n        self.is_calibrated = True if self.offline else False\n\n        self.show_landmarks = False\n        self.passthrough = False\n        self.green_overlay = False\n        self.opt_relative = True\n        self.opt_adapt_scale = True\n        self.opt_enc_downscale = 1\n        self.opt_no_pad = True\n        self.opt_in_port = 5557\n        self.opt_out_port = 5558\n        self.opt_hide_rect = False\n        self.opt_in_addr = None\n        self.opt_out_addr = None\n        self.LANDMARK_SLICE_ARRAY = np.array([17, 22, 27, 31, 36, 42, 48, 60])\n        self.display_string = \"\"\n\n    def create_model(self, model_path, **kwargs) -> None:  # type: ignore\n        opt_config = determine_path() + \"/config/vox-adv-256.yaml\"\n        opt_checkpoint = model_path\n\n        predictor_args = {\n            \"config_path\": opt_config,\n            \"checkpoint_path\": opt_checkpoint,\n            \"relative\": self.opt_relative,\n            \"adapt_movement_scale\": self.opt_adapt_scale,\n            \"enc_downscale\": self.opt_enc_downscale,\n        }\n\n        self.predictor = PredictorLocal(**predictor_args)\n\n    def change_option(self, image, **kwargs):\n        if image.ndim == 2:\n            image = np.tile(image[..., None], [1, 1, 3])\n        image = image[..., :3][..., ::-1]\n        image = resize(image, (self.crop_size, self.crop_size))\n        print(\"Image shape \", image.shape)\n        self.source_kp = self.predictor.get_frame_kp(image)\n        self.kp_source = None\n        self.predictor.set_source_image(image)\n        self.source_image = image\n\n    def handle_keyboard_input(self):\n        key = cv2.waitKey(1)\n\n        if key == ord(\"w\"):\n            self.frame_proportion -= 0.05\n            self.frame_proportion = max(self.frame_proportion, 0.1)\n        elif key == ord(\"s\"):\n            self.frame_proportion += 0.05\n            self.frame_proportion = min(self.frame_proportion, 1.0)\n        elif key == ord(\"H\"):\n            self.frame_offset_x -= 1\n        elif key == ord(\"h\"):\n            self.frame_offset_x -= 5\n        elif key == ord(\"K\"):\n            self.frame_offset_x += 1\n        elif key == ord(\"k\"):\n            self.frame_offset_x += 5\n        elif key == ord(\"J\"):\n            self.frame_offset_y -= 1\n        elif key == ord(\"j\"):\n            self.frame_offset_y -= 5\n        elif key == ord(\"U\"):\n            self.frame_offset_y += 1\n        elif key == ord(\"u\"):\n            self.frame_offset_y += 5\n        elif key == ord(\"Z\"):\n            self.frame_offset_x = 0\n            self.frame_offset_y = 0\n            self.frame_proportion = 0.9\n        elif key == ord(\"x\"):\n            self.predictor.reset_frames()\n\n            if not self.is_calibrated:\n                cv2.namedWindow(\"FOMM\", cv2.WINDOW_GUI_NORMAL)\n                cv2.moveWindow(\"FOMM\", 600, 250)\n\n            self.is_calibrated = True\n            self.show_landmarks = False\n        elif key == ord(\"z\"):\n            self.overlay_alpha = max(self.overlay_alpha - 0.1, 0.0)\n        elif key == ord(\"c\"):\n            self.overlay_alpha = min(self.overlay_alpha + 0.1, 1.0)\n        elif key == ord(\"r\"):\n            self.preview_flip = not self.preview_flip\n        elif key == ord(\"t\"):\n            self.output_flip = not self.output_flip\n        elif key == ord(\"f\"):\n            self.find_keyframe = not self.find_keyframe\n        elif key == ord(\"o\"):\n            self.show_landmarks = not self.show_landmarks\n        elif key == 48:\n            self.passthrough = not self.passthrough\n        elif key != -1:\n            log(key)\n\n    def process_image(self, image, use_gpu=True, **kwargs) -> np.array:\n        if not self.offline:\n            self.handle_keyboard_input()\n\n        stream_img_size = image.shape[1], image.shape[0]\n\n        frame = image[..., ::-1]\n\n        frame, (frame_offset_x, frame_offset_y) = crop(\n            frame,\n            p=self.frame_proportion,\n            offset_x=self.frame_offset_x,\n            offset_y=self.frame_offset_y,\n        )\n\n        frame = resize(frame, (self.crop_size, self.crop_size))[..., :3]\n\n        if self.find_keyframe:\n            if is_new_frame_better(log, self.source_image, frame, self.predictor):\n                log(\"Taking new frame!\")\n                self.green_overlay = True\n                self.predictor.reset_frames()\n\n        if self.passthrough:\n            out = frame\n        elif self.is_calibrated:\n            out = self.predictor.predict(frame)\n            if out is None:\n                log(\"predict returned None\")\n        else:\n            out = None\n\n        if self.overlay_alpha > 0:\n            preview_frame = cv2.addWeighted(\n                self.source_image,\n                self.overlay_alpha,\n                frame,\n                1.0 - self.overlay_alpha,\n                0.0,\n            )\n        else:\n            preview_frame = frame.copy()\n\n        if self.show_landmarks:\n            # Dim the background to make it easier to see the landmarks\n            preview_frame = cv2.convertScaleAbs(preview_frame, alpha=0.5, beta=0.0)\n\n            draw_face_landmarks(\n                self.LANDMARK_SLICE_ARRAY, preview_frame, self.source_kp, (200, 20, 10)\n            )\n\n            frame_kp = self.predictor.get_frame_kp(frame)\n            draw_face_landmarks(self.LANDMARK_SLICE_ARRAY, preview_frame, frame_kp)\n\n        preview_frame = cv2.flip(preview_frame, 1)\n\n        if self.green_overlay:\n            green_alpha = 0.8\n            overlay = preview_frame.copy()\n            overlay[:] = (0, 255, 0)\n            preview_frame = cv2.addWeighted(\n                preview_frame, green_alpha, overlay, 1.0 - green_alpha, 0.0\n            )\n\n        if self.find_keyframe:\n            preview_frame = cv2.putText(\n                preview_frame,\n                self.display_string,\n                (10, 220),\n                0,\n                0.5 * self.crop_size / 256,\n                (255, 255, 255),\n                1,\n            )\n\n        if not self.is_calibrated:\n            preview_frame = draw_calib_text(preview_frame)\n\n        elif self.show_landmarks:\n            preview_frame = draw_landmark_text(preview_frame)\n\n        if not self.opt_hide_rect:\n            draw_rect(preview_frame)\n\n        if not self.offline:\n            cv2.imshow(\"FOMM\", preview_frame[..., ::-1])\n\n        if out is not None:\n            if not self.opt_no_pad:\n                out = pad_img(out, stream_img_size)\n\n            if self.output_flip:\n                out = cv2.flip(out, 1)\n\n            return out[..., ::-1]\n        else:\n            return preview_frame[..., ::-1]\n"
  },
  {
    "path": "src/dot/fomm/predictor_local.py",
    "content": "#!/usr/bin/env python3\n\nimport numpy as np\nimport torch\nimport yaml\nfrom scipy.spatial import ConvexHull\n\nfrom . import face_alignment\nfrom .modules.generator_optim import OcclusionAwareGenerator\nfrom .modules.keypoint_detector import KPDetector\n\n\ndef normalize_kp(\n    kp_source,\n    kp_driving,\n    kp_driving_initial,\n    adapt_movement_scale=False,\n    use_relative_movement=False,\n    use_relative_jacobian=False,\n):\n\n    if adapt_movement_scale:\n        source_area = ConvexHull(kp_source[\"value\"][0].data.cpu().numpy()).volume\n        driving_area = ConvexHull(\n            kp_driving_initial[\"value\"][0].data.cpu().numpy()\n        ).volume\n        adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)\n    else:\n        adapt_movement_scale = 1\n\n    kp_new = {k: v for k, v in kp_driving.items()}\n\n    if use_relative_movement:\n        kp_value_diff = kp_driving[\"value\"] - kp_driving_initial[\"value\"]\n        kp_value_diff *= adapt_movement_scale\n        kp_new[\"value\"] = kp_value_diff + kp_source[\"value\"]\n\n        if use_relative_jacobian:\n            jacobian_diff = torch.matmul(\n                kp_driving[\"jacobian\"], torch.inverse(kp_driving_initial[\"jacobian\"])\n            )\n            kp_new[\"jacobian\"] = torch.matmul(jacobian_diff, kp_source[\"jacobian\"])\n\n    return kp_new\n\n\ndef to_tensor(a):\n    return torch.tensor(a[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) / 255\n\n\nclass PredictorLocal:\n    def __init__(\n        self,\n        config_path,\n        checkpoint_path,\n        relative=False,\n        adapt_movement_scale=False,\n        device=None,\n        enc_downscale=1,\n    ):\n\n        self.device = device or (\"cuda\" if torch.cuda.is_available() else \"cpu\")\n        self.relative = relative\n        self.adapt_movement_scale = adapt_movement_scale\n        self.start_frame = None\n        self.start_frame_kp = None\n        self.kp_driving_initial = None\n        self.config_path = config_path\n        self.checkpoint_path = checkpoint_path\n        self.generator, self.kp_detector = self.load_checkpoints()\n        self.fa = face_alignment.FaceAlignment(\n            face_alignment.LandmarksType.TWO_D,\n            flip_input=True,\n            device=self.device,\n            face_detector_kwargs={\n                \"path_to_detector\": \"saved_models/face_alignment/s3fd-619a316812.pth\"\n            },\n        )\n        self.source = None\n        self.kp_source = None\n        self.enc_downscale = enc_downscale\n\n    def load_checkpoints(self):\n        with open(self.config_path) as f:\n            config = yaml.load(f, Loader=yaml.FullLoader)\n\n        generator = OcclusionAwareGenerator(\n            **config[\"model_params\"][\"generator_params\"],\n            **config[\"model_params\"][\"common_params\"]\n        )\n        generator.to(self.device)\n\n        kp_detector = KPDetector(\n            **config[\"model_params\"][\"kp_detector_params\"],\n            **config[\"model_params\"][\"common_params\"]\n        )\n        kp_detector.to(self.device)\n\n        checkpoint = torch.load(self.checkpoint_path, map_location=self.device)\n        generator.load_state_dict(checkpoint[\"generator\"])\n        kp_detector.load_state_dict(checkpoint[\"kp_detector\"])\n\n        generator.eval()\n        kp_detector.eval()\n\n        return generator, kp_detector\n\n    def reset_frames(self):\n        self.kp_driving_initial = None\n\n    def set_source_image(self, source_image):\n        self.source = to_tensor(source_image).to(self.device)\n        self.kp_source = self.kp_detector(self.source)\n\n        if self.enc_downscale > 1:\n            h = int(self.source.shape[2] / self.enc_downscale)\n            w = int(self.source.shape[3] / self.enc_downscale)\n            source_enc = torch.nn.functional.interpolate(\n                self.source, size=(h, w), mode=\"bilinear\"\n            )\n        else:\n            source_enc = self.source\n\n        self.generator.encode_source(source_enc)\n\n    def predict(self, driving_frame):\n        assert self.kp_source is not None, \"call set_source_image()\"\n\n        with torch.no_grad():\n            driving = to_tensor(driving_frame).to(self.device)\n\n            if self.kp_driving_initial is None:\n                self.kp_driving_initial = self.kp_detector(driving)\n                self.start_frame = driving_frame.copy()\n                self.start_frame_kp = self.get_frame_kp(driving_frame)\n\n            kp_driving = self.kp_detector(driving)\n            kp_norm = normalize_kp(\n                kp_source=self.kp_source,\n                kp_driving=kp_driving,\n                kp_driving_initial=self.kp_driving_initial,\n                use_relative_movement=self.relative,\n                use_relative_jacobian=self.relative,\n                adapt_movement_scale=self.adapt_movement_scale,\n            )\n\n            out = self.generator(\n                self.source, kp_source=self.kp_source, kp_driving=kp_norm\n            )\n\n            out = np.transpose(out[\"prediction\"].data.cpu().numpy(), [0, 2, 3, 1])[0]\n            out = (np.clip(out, 0, 1) * 255).astype(np.uint8)\n\n            return out\n\n    def get_frame_kp(self, image):\n        kp_landmarks = self.fa.get_landmarks(image)\n        if kp_landmarks:\n            kp_image = kp_landmarks[0]\n            kp_image = self.normalize_alignment_kp(kp_image)\n            return kp_image\n        else:\n            return None\n\n    @staticmethod\n    def normalize_alignment_kp(kp):\n        kp = kp - kp.mean(axis=0, keepdims=True)\n        area = ConvexHull(kp[:, :2]).volume\n        area = np.sqrt(area)\n        kp[:, :2] = kp[:, :2] / area\n        return kp\n\n    def get_start_frame(self):\n        return self.start_frame\n\n    def get_start_frame_kp(self):\n        return self.start_frame_kp\n"
  },
  {
    "path": "src/dot/fomm/sync_batchnorm/__init__.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : __init__.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 27/01/2018\n#\n# This file is part of Synchronized-BatchNorm-PyTorch.\n# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch\n# Distributed under MIT License.\n"
  },
  {
    "path": "src/dot/fomm/sync_batchnorm/batchnorm.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : batchnorm.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 27/01/2018\n#\n# This file is part of Synchronized-BatchNorm-PyTorch.\n# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch\n# Distributed under MIT License.\n\nimport collections\n\nimport torch.nn.functional as F\nfrom torch.nn.modules.batchnorm import _BatchNorm\nfrom torch.nn.parallel._functions import Broadcast, ReduceAddCoalesced\n\nfrom .comm import SyncMaster\n\n__all__ = [\"SynchronizedBatchNorm2d\"]\n\n\ndef _sum_ft(tensor):\n    \"\"\"sum over the first and last dimention\"\"\"\n    return tensor.sum(dim=0).sum(dim=-1)\n\n\ndef _unsqueeze_ft(tensor):\n    \"\"\"add new dementions at the front and the tail\"\"\"\n    return tensor.unsqueeze(0).unsqueeze(-1)\n\n\n_ChildMessage = collections.namedtuple(\"_ChildMessage\", [\"sum\", \"ssum\", \"sum_size\"])\n\n_MasterMessage = collections.namedtuple(\"_MasterMessage\", [\"sum\", \"inv_std\"])\n\n\nclass _SynchronizedBatchNorm(_BatchNorm):\n    def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True):\n\n        super(_SynchronizedBatchNorm, self).__init__(\n            num_features, eps=eps, momentum=momentum, affine=affine\n        )\n\n        self._sync_master = SyncMaster(self._data_parallel_master)\n\n        self._is_parallel = False\n        self._parallel_id = None\n        self._slave_pipe = None\n\n    def forward(self, input):\n        # If it is not parallel computation or is in\n        # evaluation mode, use PyTorch's implementation.\n        if not (self._is_parallel and self.training):\n            return F.batch_norm(\n                input,\n                self.running_mean,\n                self.running_var,\n                self.weight,\n                self.bias,\n                self.training,\n                self.momentum,\n                self.eps,\n            )\n\n        # Resize the input to (B, C, -1).\n        input_shape = input.size()\n        input = input.view(input.size(0), self.num_features, -1)\n\n        # Compute the sum and square-sum.\n        sum_size = input.size(0) * input.size(2)\n        input_sum = _sum_ft(input)\n        input_ssum = _sum_ft(input**2)\n\n        # Reduce-and-broadcast the statistics.\n        if self._parallel_id == 0:\n            mean, inv_std = self._sync_master.run_master(\n                _ChildMessage(input_sum, input_ssum, sum_size)\n            )\n        else:\n            mean, inv_std = self._slave_pipe.run_slave(\n                _ChildMessage(input_sum, input_ssum, sum_size)\n            )\n\n        # Compute the output.\n        if self.affine:\n            # MJY:: Fuse the multiplication for speed.\n            output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(\n                inv_std * self.weight\n            ) + _unsqueeze_ft(self.bias)\n        else:\n            output = (input - _unsqueeze_ft(mean)) * _unsqueeze_ft(inv_std)\n\n        # Reshape it.\n        return output.view(input_shape)\n\n    def __data_parallel_replicate__(self, ctx, copy_id):\n        self._is_parallel = True\n        self._parallel_id = copy_id\n\n        # parallel_id == 0 means master device.\n        if self._parallel_id == 0:\n            ctx.sync_master = self._sync_master\n        else:\n            self._slave_pipe = ctx.sync_master.register_slave(copy_id)\n\n    def _data_parallel_master(self, intermediates):\n        \"\"\"Reduce the sum and square-sum,\n        compute the statistics, and broadcast it.\"\"\"\n        # Always using same \"device order\" makes the\n        # ReduceAdd operation faster.\n        # Thanks to:: Tete Xiao (http://tetexiao.com/)\n        intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n\n        to_reduce = [i[1][:2] for i in intermediates]\n        to_reduce = [j for i in to_reduce for j in i]  # flatten\n        target_gpus = [i[1].sum.get_device() for i in intermediates]\n\n        sum_size = sum([i[1].sum_size for i in intermediates])\n        sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n        mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n\n        broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n\n        outputs = []\n        for i, rec in enumerate(intermediates):\n            outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2 : i * 2 + 2])))\n\n        return outputs\n\n    def _compute_mean_std(self, sum_, ssum, size):\n        \"\"\"Compute the mean and standard-deviation with\n        sum and square-sum. This method also maintains\n        the moving average on the master device.\"\"\"\n        assert size > 1, (\n            \"BatchNorm computes unbiased \"\n            \"standard-deviation, which requires size > 1.\"\n        )\n        mean = sum_ / size\n        sumvar = ssum - sum_ * mean\n        unbias_var = sumvar / (size - 1)\n        bias_var = sumvar / size\n\n        self.running_mean = (\n            1 - self.momentum\n        ) * self.running_mean + self.momentum * mean.data\n        self.running_var = (\n            1 - self.momentum\n        ) * self.running_var + self.momentum * unbias_var.data\n\n        return mean, bias_var.clamp(self.eps) ** -0.5\n\n\nclass SynchronizedBatchNorm2d(_SynchronizedBatchNorm):\n    r\"\"\"Applies Batch Normalization over a 4d input that is seen as a\n    mini-batch of 3d inputs\n\n    .. math::\n\n        y = \\frac{x - mean[x]}{ \\sqrt{Var[x] + \\epsilon}} * gamma + beta\n\n    This module differs from the built-in PyTorch BatchNorm2d as the mean and\n    standard-deviation are reduced across all devices during training.\n\n    For example, when one uses `nn.DataParallel` to wrap the network during\n    training, PyTorch's implementation normalize the tensor on each device\n    using the statistics only on that device, which accelerated the\n    computation and is also easy to implement, but the statistics might\n    be inaccurate.\n    Instead, in this synchronized version, the statistics will be computed\n    over all training samples distributed on multiple devices.\n\n    Note that, for one-GPU or CPU-only case, this module behaves exactly same\n    as the built-in PyTorch implementation.\n\n    The mean and standard-deviation are calculated per-dimension over\n    the mini-batches and gamma and beta are learnable parameter vectors\n    of size C (where C is the input size).\n\n    During training, this layer keeps a running estimate of its computed mean\n    and variance. The running sum is kept with a default momentum of 0.1.\n\n    During evaluation, this running mean/variance is used for normalization.\n\n    Because the BatchNorm is done over the `C` dimension, computing statistics\n    on `(N, H, W)` slices, it's common terminology to call this Spatial\n    BatchNorm\n\n    Args:\n        num_features: num_features from an expected input of\n            size batch_size x num_features x height x width\n        eps: a value added to the denominator for numerical stability.\n            Default: 1e-5\n        momentum: the value used for the running_mean and running_var\n            computation. Default: 0.1\n        affine: a boolean value that when set to ``True``,\n            gives the layer learnable\n            affine parameters. Default: ``True``\n\n    Shape:\n        - Input: :math:`(N, C, H, W)`\n        - Output: :math:`(N, C, H, W)` (same shape as input)\n\n    Examples:\n        >>> # With Learnable Parameters\n        >>> m = SynchronizedBatchNorm2d(100)\n        >>> # Without Learnable Parameters\n        >>> m = SynchronizedBatchNorm2d(100, affine=False)\n        >>> input = torch.autograd.Variable(torch.randn(20, 100, 35, 45))\n        >>> output = m(input)\n    \"\"\"\n\n    def _check_input_dim(self, input):\n        if input.dim() != 4:\n            raise ValueError(\"expected 4D input (got {}D input)\".format(input.dim()))\n        super(SynchronizedBatchNorm2d, self)._check_input_dim(input)\n"
  },
  {
    "path": "src/dot/fomm/sync_batchnorm/comm.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# File   : comm.py\n# Author : Jiayuan Mao\n# Email  : maojiayuan@gmail.com\n# Date   : 27/01/2018\n#\n# This file is part of Synchronized-BatchNorm-PyTorch.\n# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch\n# Distributed under MIT License.\n\nimport collections\nimport queue\nimport threading\n\n__all__ = [\"FutureResult\", \"SlavePipe\", \"SyncMaster\"]\n\n\nclass FutureResult(object):\n    \"\"\"\n    A thread-safe future implementation.\n    Used only as one-to-one pipe.\n    \"\"\"\n\n    def __init__(self):\n        self._result = None\n        self._lock = threading.Lock()\n        self._cond = threading.Condition(self._lock)\n\n    def put(self, result):\n        with self._lock:\n            assert self._result is None, \"Previous result has't been fetched.\"\n            self._result = result\n            self._cond.notify()\n\n    def get(self):\n        with self._lock:\n            if self._result is None:\n                self._cond.wait()\n\n            res = self._result\n            self._result = None\n            return res\n\n\n_MasterRegistry = collections.namedtuple(\"_MasterRegistry\", [\"result\"])\n_SlavePipeBase = collections.namedtuple(\n    \"_SlavePipeBase\", [\"identifier\", \"queue\", \"result\"]\n)\n\n\nclass SlavePipe(_SlavePipeBase):\n    \"\"\"\n    Pipe for master-slave communication.\n    \"\"\"\n\n    def run_slave(self, msg):\n        self.queue.put((self.identifier, msg))\n        ret = self.result.get()\n        self.queue.put(True)\n        return ret\n\n\nclass SyncMaster(object):\n    \"\"\"\n    An abstract `SyncMaster` object.\n\n    - During the replication, as the data parallel will\n    trigger an callback of each module, all slave devices should\n    call `register(id)` and obtain an `SlavePipe`\n    to communicate with the master.\n    - During the forward pass, master device invokes\n    `run_master`, all messages from slave devices\n    will be collected, and passed to a registered callback.\n    - After receiving the messages, the master device\n    should gather the information and determine\n    to message passed back to each slave devices.\n    \"\"\"\n\n    def __init__(self, master_callback):\n        \"\"\"\n        Args:\n            master_callback: a callback to be invoked\n            after having collected messages from slave devices.\n        \"\"\"\n        self._master_callback = master_callback\n        self._queue = queue.Queue()\n        self._registry = collections.OrderedDict()\n        self._activated = False\n\n    def __getstate__(self):\n        return {\"master_callback\": self._master_callback}\n\n    def __setstate__(self, state):\n        self.__init__(state[\"master_callback\"])\n\n    def register_slave(self, identifier):\n        \"\"\"\n        Register an slave device.\n\n        Args:\n            identifier: an identifier, usually is the device id.\n\n        Returns: a `SlavePipe` object which can be used\n                 to communicate with the master device.\n        \"\"\"\n        if self._activated:\n            assert self._queue.empty(), (\n                \"Queue is not clean \" \"before next initialization.\"\n            )\n            self._activated = False\n            self._registry.clear()\n        future = FutureResult()\n        self._registry[identifier] = _MasterRegistry(future)\n        return SlavePipe(identifier, self._queue, future)\n\n    def run_master(self, master_msg):\n        \"\"\"\n        Main entry for the master device in each forward pass.\n        The messages were first collected from each devices\n        (including the master device), and then an callback\n        will be invoked to compute the message to be sent\n        back to each devices (including the master device).\n\n        Args:\n            master_msg: the message that the master want to send\n            to itself. This will be placed as the first message\n            when calling `master_callback`.\n            For detailed usage, see `_SynchronizedBatchNorm`\n            for an example.\n\n        Returns: the message to be sent back to the master device.\n        \"\"\"\n        self._activated = True\n\n        intermediates = [(0, master_msg)]\n        for i in range(self.nr_slaves):\n            intermediates.append(self._queue.get())\n\n        results = self._master_callback(intermediates)\n        assert results[0][0] == 0, \"The first result \" \"should belongs to the master.\"\n\n        for i, res in results:\n            if i == 0:\n                continue\n            self._registry[i].result.put(res)\n\n        for i in range(self.nr_slaves):\n            assert self._queue.get() is True\n\n        return results[0][1]\n\n    @property\n    def nr_slaves(self):\n        return len(self._registry)\n"
  },
  {
    "path": "src/dot/gpen/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/__init_paths.py",
    "content": "#!/usr/bin/env python3\n\n\"\"\"\n@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)\n@author: yangxy (yangtao9009@gmail.com)\n\"\"\"\nimport os.path as osp\nimport sys\n\n\ndef add_path(path):\n    if path not in sys.path:\n        sys.path.insert(0, path)\n\n\nthis_dir = osp.dirname(__file__)\n\npath = osp.join(this_dir, \"retinaface\")\nadd_path(path)\n\npath = osp.join(this_dir, \"face_model\")\nadd_path(path)\n"
  },
  {
    "path": "src/dot/gpen/align_faces.py",
    "content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Mon Apr 24 15:43:29 2017\n@author: zhaoy\n\n@Modified by yangxy (yangtao9009@gmail.com)\n\"\"\"\nimport cv2\nimport numpy as np\n\n# reference facial points, a list of coordinates (x,y)\nREFERENCE_FACIAL_POINTS = [\n    [30.29459953, 51.69630051],\n    [65.53179932, 51.50139999],\n    [48.02519989, 71.73660278],\n    [33.54930115, 92.3655014],\n    [62.72990036, 92.20410156],\n]\n\nDEFAULT_CROP_SIZE = (96, 112)\n\n\ndef _umeyama(src, dst, estimate_scale=True, scale=1.0):\n    \"\"\"Estimate N-D similarity transformation with or without scaling.\n    Parameters\n    ----------\n    src : (M, N) array\n        Source coordinates.\n    dst : (M, N) array\n        Destination coordinates.\n    estimate_scale : bool\n        Whether to estimate scaling factor.\n    Returns\n    -------\n    T : (N + 1, N + 1)\n        The homogeneous similarity transformation matrix. The matrix contains\n        NaN values only if the problem is not well-conditioned.\n    References\n    ----------\n    .. [1] \"Least-squares estimation of transformation parameters between two\n            point patterns\", Shinji Umeyama, PAMI 1991, :DOI:`10.1109/34.88573`\n    \"\"\"\n\n    num = src.shape[0]\n    dim = src.shape[1]\n\n    # Compute mean of src and dst.\n    src_mean = src.mean(axis=0)\n    dst_mean = dst.mean(axis=0)\n\n    # Subtract mean from src and dst.\n    src_demean = src - src_mean\n    dst_demean = dst - dst_mean\n\n    # Eq. (38).\n    A = dst_demean.T @ src_demean / num\n\n    # Eq. (39).\n    d = np.ones((dim,), dtype=np.double)\n    if np.linalg.det(A) < 0:\n        d[dim - 1] = -1\n\n    T = np.eye(dim + 1, dtype=np.double)\n\n    U, S, V = np.linalg.svd(A)\n\n    # Eq. (40) and (43).\n    rank = np.linalg.matrix_rank(A)\n    if rank == 0:\n        return np.nan * T\n    elif rank == dim - 1:\n        if np.linalg.det(U) * np.linalg.det(V) > 0:\n            T[:dim, :dim] = U @ V\n        else:\n            s = d[dim - 1]\n            d[dim - 1] = -1\n            T[:dim, :dim] = U @ np.diag(d) @ V\n            d[dim - 1] = s\n    else:\n        T[:dim, :dim] = U @ np.diag(d) @ V\n\n    if estimate_scale:\n        # Eq. (41) and (42).\n        scale = 1.0 / src_demean.var(axis=0).sum() * (S @ d)\n    else:\n        scale = scale\n\n    T[:dim, dim] = dst_mean - scale * (T[:dim, :dim] @ src_mean.T)\n    T[:dim, :dim] *= scale\n\n    return T, scale\n\n\nclass FaceWarpException(Exception):\n    def __str__(self):\n        return \"In File {}:{}\".format(__file__, super.__str__(self))\n\n\ndef get_reference_facial_points(\n    output_size=None,\n    inner_padding_factor=0.0,\n    outer_padding=(0, 0),\n    default_square=False,\n):\n    tmp_5pts = np.array(REFERENCE_FACIAL_POINTS)\n    tmp_crop_size = np.array(DEFAULT_CROP_SIZE)\n\n    # 0) make the inner region a square\n    if default_square:\n        size_diff = max(tmp_crop_size) - tmp_crop_size\n        tmp_5pts += size_diff / 2\n        tmp_crop_size += size_diff\n\n    if (\n        output_size\n        and output_size[0] == tmp_crop_size[0]\n        and output_size[1] == tmp_crop_size[1]\n    ):\n        print(\n            \"output_size == DEFAULT_CROP_SIZE {}: return default reference points\".format(\n                tmp_crop_size\n            )\n        )\n        return tmp_5pts\n\n    if inner_padding_factor == 0 and outer_padding == (0, 0):\n        if output_size is None:\n            print(\"No paddings to do: return default reference points\")\n            return tmp_5pts\n        else:\n            raise FaceWarpException(\n                \"No paddings to do, output_size must be None or {}\".format(\n                    tmp_crop_size\n                )\n            )\n\n    # check output size\n    if not (0 <= inner_padding_factor <= 1.0):\n        raise FaceWarpException(\"Not (0 <= inner_padding_factor <= 1.0)\")\n\n    if (\n        inner_padding_factor > 0 or outer_padding[0] > 0 or outer_padding[1] > 0\n    ) and output_size is None:\n        output_size = tmp_crop_size * (1 + inner_padding_factor * 2).astype(np.int32)\n        output_size += np.array(outer_padding)\n        print(\"              deduced from paddings, output_size = \", output_size)\n\n    if not (outer_padding[0] < output_size[0] and outer_padding[1] < output_size[1]):\n        raise FaceWarpException(\n            \"Not (outer_padding[0] < output_size[0]\"\n            \"and outer_padding[1] < output_size[1])\"\n        )\n\n    # 1) pad the inner region according inner_padding_factor\n    if inner_padding_factor > 0:\n        size_diff = tmp_crop_size * inner_padding_factor * 2\n        tmp_5pts += size_diff / 2\n        tmp_crop_size += np.round(size_diff).astype(np.int32)\n\n    # 2) resize the padded inner region\n    size_bf_outer_pad = np.array(output_size) - np.array(outer_padding) * 2\n\n    if (\n        size_bf_outer_pad[0] * tmp_crop_size[1]\n        != size_bf_outer_pad[1] * tmp_crop_size[0]\n    ):\n        raise FaceWarpException(\n            \"Must have (output_size - outer_padding)\"\n            \"= some_scale * (crop_size * (1.0 + inner_padding_factor)\"\n        )\n\n    scale_factor = size_bf_outer_pad[0].astype(np.float32) / tmp_crop_size[0]\n    tmp_5pts = tmp_5pts * scale_factor\n    tmp_crop_size = size_bf_outer_pad\n\n    # 3) add outer_padding to make output_size\n    reference_5point = tmp_5pts + np.array(outer_padding)\n    tmp_crop_size = output_size\n\n    return reference_5point\n\n\ndef get_affine_transform_matrix(src_pts, dst_pts):\n    tfm = np.float32([[1, 0, 0], [0, 1, 0]])\n    n_pts = src_pts.shape[0]\n    ones = np.ones((n_pts, 1), src_pts.dtype)\n    src_pts_ = np.hstack([src_pts, ones])\n    dst_pts_ = np.hstack([dst_pts, ones])\n\n    A, res, rank, s = np.linalg.lstsq(src_pts_, dst_pts_)\n\n    if rank == 3:\n        tfm = np.float32([[A[0, 0], A[1, 0], A[2, 0]], [A[0, 1], A[1, 1], A[2, 1]]])\n    elif rank == 2:\n        tfm = np.float32([[A[0, 0], A[1, 0], 0], [A[0, 1], A[1, 1], 0]])\n\n    return tfm\n\n\ndef warp_and_crop_face(\n    src_img, facial_pts, reference_pts=None, crop_size=(96, 112), align_type=\"smilarity\"\n):  # smilarity cv2_affine affine\n    if reference_pts is None:\n        if crop_size[0] == 96 and crop_size[1] == 112:\n            reference_pts = REFERENCE_FACIAL_POINTS\n        else:\n            default_square = False\n            inner_padding_factor = 0\n            outer_padding = (0, 0)\n            output_size = crop_size\n\n            reference_pts = get_reference_facial_points(\n                output_size, inner_padding_factor, outer_padding, default_square\n            )\n\n    ref_pts = np.float32(reference_pts)\n    ref_pts_shp = ref_pts.shape\n    if max(ref_pts_shp) < 3 or min(ref_pts_shp) != 2:\n        raise FaceWarpException(\"reference_pts.shape must be (K,2) or (2,K) and K>2\")\n\n    if ref_pts_shp[0] == 2:\n        ref_pts = ref_pts.T\n\n    src_pts = np.float32(facial_pts)\n    src_pts_shp = src_pts.shape\n    if max(src_pts_shp) < 3 or min(src_pts_shp) != 2:\n        raise FaceWarpException(\"facial_pts.shape must be (K,2) or (2,K) and K>2\")\n\n    if src_pts_shp[0] == 2:\n        src_pts = src_pts.T\n\n    if src_pts.shape != ref_pts.shape:\n        raise FaceWarpException(\"facial_pts and reference_pts must have the same shape\")\n\n    if align_type == \"cv2_affine\":\n        tfm = cv2.getAffineTransform(src_pts[0:3], ref_pts[0:3])\n        tfm_inv = cv2.getAffineTransform(ref_pts[0:3], src_pts[0:3])\n    elif align_type == \"affine\":\n        tfm = get_affine_transform_matrix(src_pts, ref_pts)\n        tfm_inv = get_affine_transform_matrix(ref_pts, src_pts)\n    else:\n        params, scale = _umeyama(src_pts, ref_pts)\n        tfm = params[:2, :]\n\n        params, _ = _umeyama(ref_pts, src_pts, False, scale=1.0 / scale)\n        tfm_inv = params[:2, :]\n\n    face_img = cv2.warpAffine(src_img, tfm, (crop_size[0], crop_size[1]), flags=3)\n\n    return face_img, tfm_inv\n"
  },
  {
    "path": "src/dot/gpen/face_enhancement.py",
    "content": "#!/usr/bin/env python3\n\n\"\"\"\n@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)\n@author: yangxy (yangtao9009@gmail.com)\n\"\"\"\nimport glob\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom .align_faces import get_reference_facial_points, warp_and_crop_face\nfrom .face_model.face_gan import FaceGAN\nfrom .retinaface.retinaface_detection import RetinaFaceDetection\n\n\nclass FaceEnhancement(object):\n    def __init__(\n        self,\n        base_dir=\"./\",\n        size=512,\n        model=None,\n        channel_multiplier=2,\n        narrow=1,\n        use_gpu=True,\n    ):\n        self.facedetector = RetinaFaceDetection(base_dir, use_gpu=use_gpu)\n        self.facegan = FaceGAN(\n            base_dir, size, model, channel_multiplier, narrow, use_gpu=use_gpu\n        )\n        self.size = size\n        self.threshold = 0.9\n\n        # the mask for pasting restored faces back\n        self.mask = np.zeros((512, 512), np.float32)\n        cv2.rectangle(self.mask, (26, 26), (486, 486), (1, 1, 1), -1, cv2.LINE_AA)\n        self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)\n        self.mask = cv2.GaussianBlur(self.mask, (101, 101), 11)\n\n        self.kernel = np.array(\n            ([0.0625, 0.125, 0.0625], [0.125, 0.25, 0.125], [0.0625, 0.125, 0.0625]),\n            dtype=\"float32\",\n        )\n\n        # get the reference 5 landmarks position in the crop settings\n        default_square = True\n        inner_padding_factor = 0.25\n        outer_padding = (0, 0)\n        self.reference_5pts = get_reference_facial_points(\n            (self.size, self.size), inner_padding_factor, outer_padding, default_square\n        )\n\n    def process(self, img, use_gpu=True):\n        facebs, landms = self.facedetector.detect(img, use_gpu=use_gpu)\n        orig_faces, enhanced_faces = [], []\n        height, width = img.shape[:2]\n        full_mask = np.zeros((height, width), dtype=np.float32)\n        full_img = np.zeros(img.shape, dtype=np.uint8)\n\n        for i, (faceb, facial5points) in enumerate(zip(facebs, landms)):\n            if faceb[4] < self.threshold:\n                continue\n            fh, fw = (faceb[3] - faceb[1]), (faceb[2] - faceb[0])\n\n            facial5points = np.reshape(facial5points, (2, 5))\n\n            of, tfm_inv = warp_and_crop_face(\n                img,\n                facial5points,\n                reference_pts=self.reference_5pts,\n                crop_size=(self.size, self.size),\n            )\n\n            # enhance the face\n            ef = self.facegan.process(of, use_gpu=use_gpu)\n            orig_faces.append(of)\n            enhanced_faces.append(ef)\n\n            tmp_mask = self.mask\n            tmp_mask = cv2.resize(tmp_mask, ef.shape[:2])\n            tmp_mask = cv2.warpAffine(tmp_mask, tfm_inv, (width, height), flags=3)\n\n            if min(fh, fw) < 100:  # gaussian filter for small faces\n                ef = cv2.filter2D(ef, -1, self.kernel)\n\n            tmp_img = cv2.warpAffine(ef, tfm_inv, (width, height), flags=3)\n\n            mask = tmp_mask - full_mask\n            full_mask[np.where(mask > 0)] = tmp_mask[np.where(mask > 0)]\n            full_img[np.where(mask > 0)] = tmp_img[np.where(mask > 0)]\n\n        full_mask = full_mask[:, :, np.newaxis]\n        img = cv2.convertScaleAbs(img * (1 - full_mask) + full_img * full_mask)\n\n        return img, orig_faces, enhanced_faces\n\n\nif __name__ == \"__main__\":\n    # model = {'name':'GPEN-BFR-512', 'size':512, 'channel_multiplier':2, 'narrow':1}\n    model = {\n        \"name\": \"GPEN-BFR-256\",\n        \"size\": 256,\n        \"channel_multiplier\": 1,\n        \"narrow\": 0.5,\n    }\n\n    indir = \"examples/imgs\"\n    outdir = \"examples/outs-BFR\"\n    os.makedirs(outdir, exist_ok=True)\n\n    faceenhancer = FaceEnhancement(\n        size=model[\"size\"],\n        model=model[\"name\"],\n        channel_multiplier=model[\"channel_multiplier\"],\n        narrow=model[\"narrow\"],\n    )\n\n    files = sorted(glob.glob(os.path.join(indir, \"*.*g\")))\n    for n, file in enumerate(files[:]):\n        filename = os.path.basename(file)\n\n        im = cv2.imread(file, cv2.IMREAD_COLOR)  # BGR\n        if not isinstance(im, np.ndarray):\n            print(filename, \"error\")\n            continue\n        im = cv2.resize(im, (0, 0), fx=2, fy=2)\n\n        img, orig_faces, enhanced_faces = faceenhancer.process(im)\n\n        cv2.imwrite(\n            os.path.join(outdir, \".\".join(filename.split(\".\")[:-1]) + \"_COMP.jpg\"),\n            np.hstack((im, img)),\n        )\n        cv2.imwrite(\n            os.path.join(outdir, \".\".join(filename.split(\".\")[:-1]) + \"_GPEN.jpg\"), img\n        )\n\n        for m, (ef, of) in enumerate(zip(enhanced_faces, orig_faces)):\n            of = cv2.resize(of, ef.shape[:2])\n            cv2.imwrite(\n                os.path.join(\n                    outdir,\n                    \".\".join(filename.split(\".\")[:-1]) + \"_face%02d\" % m + \".jpg\",\n                ),\n                np.hstack((of, ef)),\n            )\n\n        if n % 10 == 0:\n            print(n, filename)\n"
  },
  {
    "path": "src/dot/gpen/face_model/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/face_model/face_gan.py",
    "content": "#!/usr/bin/env python3\n\n\"\"\"\n@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)\n@author: yangxy (yangtao9009@gmail.com)\n\"\"\"\nimport os\n\nimport cv2\nimport numpy as np\nimport torch\n\nfrom .model import FullGenerator\n\n\nclass FaceGAN(object):\n    def __init__(\n        self,\n        base_dir=\"./\",\n        size=512,\n        model=None,\n        channel_multiplier=2,\n        narrow=1,\n        is_norm=True,\n        use_gpu=True,\n    ):\n        self.mfile = os.path.join(base_dir, \"weights\", model + \".pth\")\n        self.n_mlp = 8\n        self.is_norm = is_norm\n        self.resolution = size\n        self.device = (\n            (\"mps\" if torch.backends.mps.is_available() else \"cuda\")\n            if use_gpu\n            else \"cpu\"\n        )\n        self.load_model(\n            channel_multiplier=channel_multiplier, narrow=narrow, use_gpu=use_gpu\n        )\n\n    def load_model(self, channel_multiplier=2, narrow=1, use_gpu=True):\n        if use_gpu:\n            self.model = FullGenerator(\n                self.resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow\n            ).to(self.device)\n            pretrained_dict = torch.load(self.mfile, map_location=self.device)\n        else:\n            self.model = FullGenerator(\n                self.resolution, 512, self.n_mlp, channel_multiplier, narrow=narrow\n            ).cpu()\n            pretrained_dict = torch.load(self.mfile, map_location=torch.device(\"cpu\"))\n\n        self.model.load_state_dict(pretrained_dict)\n        self.model.eval()\n\n    def process(self, img, use_gpu=True):\n        img = cv2.resize(img, (self.resolution, self.resolution))\n        img_t = self.img2tensor(img, use_gpu)\n\n        with torch.no_grad():\n            out, __ = self.model(img_t)\n\n        out = self.tensor2img(out)\n\n        return out\n\n    def img2tensor(self, img, use_gpu=True):\n        if use_gpu:\n            img_t = torch.from_numpy(img).to(self.device) / 255.0\n        else:\n            img_t = torch.from_numpy(img).cpu() / 255.0\n        if self.is_norm:\n            img_t = (img_t - 0.5) / 0.5\n        img_t = img_t.permute(2, 0, 1).unsqueeze(0).flip(1)  # BGR->RGB\n        return img_t\n\n    def tensor2img(self, img_t, pmax=255.0, imtype=np.uint8):\n        if self.is_norm:\n            img_t = img_t * 0.5 + 0.5\n        img_t = img_t.squeeze(0).permute(1, 2, 0).flip(2)  # RGB->BGR\n        img_np = np.clip(img_t.float().cpu().numpy(), 0, 1) * pmax\n\n        return img_np.astype(imtype)\n"
  },
  {
    "path": "src/dot/gpen/face_model/model.py",
    "content": "#!/usr/bin/env python3\n\n\"\"\"\n@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)\n@author: yangxy (yangtao9009@gmail.com)\n\"\"\"\nimport itertools\nimport math\nimport random\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .op.fused_act_v2 import FusedLeakyReLU_v2 as FusedLeakyReLU\nfrom .op.fused_act_v2 import fused_leaky_relu_v2 as fused_leaky_relu\nfrom .op.upfirdn2d_v2 import upfirdn2d_v2 as upfirdn2d\n\n\nclass PixelNorm(nn.Module):\n    def __init__(self):\n        super().__init__()\n\n    def forward(self, input):\n        return input * torch.rsqrt(torch.mean(input**2, dim=1, keepdim=True) + 1e-8)\n\n\ndef make_kernel(k):\n    k = torch.tensor(k, dtype=torch.float32)\n\n    if k.ndim == 1:\n        k = k[None, :] * k[:, None]\n\n    k /= k.sum()\n\n    return k\n\n\nclass Upsample(nn.Module):\n    def __init__(self, kernel, factor=2):\n        super().__init__()\n\n        self.factor = factor\n        kernel = make_kernel(kernel) * (factor**2)\n        self.register_buffer(\"kernel\", kernel)\n\n        p = kernel.shape[0] - factor\n\n        pad0 = (p + 1) // 2 + factor - 1\n        pad1 = p // 2\n\n        self.pad = (pad0, pad1)\n\n    def forward(self, input):\n        out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad)\n\n        return out\n\n\nclass Downsample(nn.Module):\n    def __init__(self, kernel, factor=2):\n        super().__init__()\n\n        self.factor = factor\n        kernel = make_kernel(kernel)\n        self.register_buffer(\"kernel\", kernel)\n\n        p = kernel.shape[0] - factor\n\n        pad0 = (p + 1) // 2\n        pad1 = p // 2\n\n        self.pad = (pad0, pad1)\n\n    def forward(self, input):\n        out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad)\n\n        return out\n\n\nclass Blur(nn.Module):\n    def __init__(self, kernel, pad, upsample_factor=1):\n        super().__init__()\n\n        kernel = make_kernel(kernel)\n\n        if upsample_factor > 1:\n            kernel = kernel * (upsample_factor**2)\n\n        self.register_buffer(\"kernel\", kernel)\n\n        self.pad = pad\n\n    def forward(self, input):\n        out = upfirdn2d(input, self.kernel, pad=self.pad)\n\n        return out\n\n\nclass EqualConv2d(nn.Module):\n    def __init__(\n        self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True\n    ):\n        super().__init__()\n\n        self.weight = nn.Parameter(\n            torch.randn(out_channel, in_channel, kernel_size, kernel_size)\n        )\n        self.scale = 1 / math.sqrt(in_channel * kernel_size**2)\n\n        self.stride = stride\n        self.padding = padding\n\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(out_channel))\n\n        else:\n            self.bias = None\n\n    def forward(self, input):\n        out = F.conv2d(\n            input,\n            self.weight * self.scale,\n            bias=self.bias,\n            stride=self.stride,\n            padding=self.padding,\n        )\n\n        return out\n\n    def __repr__(self):\n        return (\n            f\"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},\"\n            f\" {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})\"\n        )\n\n\nclass EqualLinear(nn.Module):\n    def __init__(\n        self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None\n    ):\n        super().__init__()\n\n        self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))\n\n        if bias:\n            self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))\n\n        else:\n            self.bias = None\n\n        self.activation = activation\n\n        self.scale = (1 / math.sqrt(in_dim)) * lr_mul\n        self.lr_mul = lr_mul\n\n    def forward(self, input):\n        if self.activation:\n            out = F.linear(input, self.weight * self.scale)\n            out = fused_leaky_relu(out, self.bias * self.lr_mul)\n\n        else:\n            out = F.linear(\n                input, self.weight * self.scale, bias=self.bias * self.lr_mul\n            )\n\n        return out\n\n    def __repr__(self):\n        return (\n            f\"{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})\"\n        )\n\n\nclass ScaledLeakyReLU(nn.Module):\n    def __init__(self, negative_slope=0.2):\n        super().__init__()\n\n        self.negative_slope = negative_slope\n\n    def forward(self, input):\n        out = F.leaky_relu(input, negative_slope=self.negative_slope)\n\n        return out * math.sqrt(2)\n\n\nclass ModulatedConv2d(nn.Module):\n    def __init__(\n        self,\n        in_channel,\n        out_channel,\n        kernel_size,\n        style_dim,\n        demodulate=True,\n        upsample=False,\n        downsample=False,\n        blur_kernel=[1, 3, 3, 1],\n    ):\n        super().__init__()\n\n        self.eps = 1e-8\n        self.kernel_size = kernel_size\n        self.in_channel = in_channel\n        self.out_channel = out_channel\n        self.upsample = upsample\n        self.downsample = downsample\n\n        if upsample:\n            factor = 2\n            p = (len(blur_kernel) - factor) - (kernel_size - 1)\n            pad0 = (p + 1) // 2 + factor - 1\n            pad1 = p // 2 + 1\n\n            self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor)\n\n        if downsample:\n            factor = 2\n            p = (len(blur_kernel) - factor) + (kernel_size - 1)\n            pad0 = (p + 1) // 2\n            pad1 = p // 2\n\n            self.blur = Blur(blur_kernel, pad=(pad0, pad1))\n\n        fan_in = in_channel * kernel_size**2\n        self.scale = 1 / math.sqrt(fan_in)\n        self.padding = kernel_size // 2\n\n        self.weight = nn.Parameter(\n            torch.randn(1, out_channel, in_channel, kernel_size, kernel_size)\n        )\n\n        self.modulation = EqualLinear(style_dim, in_channel, bias_init=1)\n\n        self.demodulate = demodulate\n\n    def __repr__(self):\n        return (\n            f\"{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, \"\n            f\"upsample={self.upsample}, downsample={self.downsample})\"\n        )\n\n    def forward(self, input, style):\n        batch, in_channel, height, width = input.shape\n\n        style = self.modulation(style).view(batch, 1, in_channel, 1, 1)\n        weight = self.scale * self.weight * style\n\n        if self.demodulate:\n            demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)\n            weight = weight * demod.view(batch, self.out_channel, 1, 1, 1)\n\n        weight = weight.view(\n            batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size\n        )\n\n        if self.upsample:\n            input = input.view(1, batch * in_channel, height, width)\n            weight = weight.view(\n                batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size\n            )\n            weight = weight.transpose(1, 2).reshape(\n                batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size\n            )\n            out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)\n            _, _, height, width = out.shape\n            out = out.view(batch, self.out_channel, height, width)\n            out = self.blur(out)\n\n        elif self.downsample:\n            input = self.blur(input)\n            _, _, height, width = input.shape\n            input = input.view(1, batch * in_channel, height, width)\n            out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)\n            _, _, height, width = out.shape\n            out = out.view(batch, self.out_channel, height, width)\n\n        else:\n            input = input.view(1, batch * in_channel, height, width)\n            out = F.conv2d(input, weight, padding=self.padding, groups=batch)\n            _, _, height, width = out.shape\n            out = out.view(batch, self.out_channel, height, width)\n\n        return out\n\n\nclass NoiseInjection(nn.Module):\n    def __init__(self, isconcat=True):\n        super().__init__()\n\n        self.isconcat = isconcat\n        self.weight = nn.Parameter(torch.zeros(1))\n\n    def forward(self, image, noise=None):\n        if noise is None:\n            batch, _, height, width = image.shape\n            noise = image.new_empty(batch, 1, height, width).normal_()\n\n        if self.isconcat:\n            return torch.cat((image, self.weight * noise), dim=1)\n        else:\n            return image + self.weight * noise\n\n\nclass ConstantInput(nn.Module):\n    def __init__(self, channel, size=4):\n        super().__init__()\n\n        self.input = nn.Parameter(torch.randn(1, channel, size, size))\n\n    def forward(self, input):\n        batch = input.shape[0]\n        out = self.input.repeat(batch, 1, 1, 1)\n\n        return out\n\n\nclass StyledConv(nn.Module):\n    def __init__(\n        self,\n        in_channel,\n        out_channel,\n        kernel_size,\n        style_dim,\n        upsample=False,\n        blur_kernel=[1, 3, 3, 1],\n        demodulate=True,\n        isconcat=True,\n    ):\n        super().__init__()\n\n        self.conv = ModulatedConv2d(\n            in_channel,\n            out_channel,\n            kernel_size,\n            style_dim,\n            upsample=upsample,\n            blur_kernel=blur_kernel,\n            demodulate=demodulate,\n        )\n\n        self.noise = NoiseInjection(isconcat)\n        feat_multiplier = 2 if isconcat else 1\n        self.activate = FusedLeakyReLU(out_channel * feat_multiplier)\n\n    def forward(self, input, style, noise=None):\n        out = self.conv(input, style)\n        out = self.noise(out, noise=noise)\n        out = self.activate(out)\n\n        return out\n\n\nclass ToRGB(nn.Module):\n    def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]):\n        super().__init__()\n\n        if upsample:\n            self.upsample = Upsample(blur_kernel)\n\n        self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False)\n        self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))\n\n    def forward(self, input, style, skip=None):\n        out = self.conv(input, style)\n        out = out + self.bias\n\n        if skip is not None:\n            skip = self.upsample(skip)\n\n            out = out + skip\n\n        return out\n\n\nclass Generator(nn.Module):\n    def __init__(\n        self,\n        size,\n        style_dim,\n        n_mlp,\n        channel_multiplier=2,\n        blur_kernel=[1, 3, 3, 1],\n        lr_mlp=0.01,\n        isconcat=True,\n        narrow=1,\n    ):\n        super().__init__()\n\n        self.size = size\n        self.n_mlp = n_mlp\n        self.style_dim = style_dim\n        self.feat_multiplier = 2 if isconcat else 1\n\n        layers = [PixelNorm()]\n\n        for i in range(n_mlp):\n            layers.append(\n                EqualLinear(\n                    style_dim, style_dim, lr_mul=lr_mlp, activation=\"fused_lrelu\"\n                )\n            )\n\n        self.style = nn.Sequential(*layers)\n\n        self.channels = {\n            4: int(512 * narrow),\n            8: int(512 * narrow),\n            16: int(512 * narrow),\n            32: int(512 * narrow),\n            64: int(256 * channel_multiplier * narrow),\n            128: int(128 * channel_multiplier * narrow),\n            256: int(64 * channel_multiplier * narrow),\n            512: int(32 * channel_multiplier * narrow),\n            1024: int(16 * channel_multiplier * narrow),\n        }\n\n        self.input = ConstantInput(self.channels[4])\n        self.conv1 = StyledConv(\n            self.channels[4],\n            self.channels[4],\n            3,\n            style_dim,\n            blur_kernel=blur_kernel,\n            isconcat=isconcat,\n        )\n        self.to_rgb1 = ToRGB(\n            self.channels[4] * self.feat_multiplier, style_dim, upsample=False\n        )\n\n        self.log_size = int(math.log(size, 2))\n\n        self.convs = nn.ModuleList()\n        self.upsamples = nn.ModuleList()\n        self.to_rgbs = nn.ModuleList()\n\n        in_channel = self.channels[4]\n\n        for i in range(3, self.log_size + 1):\n            out_channel = self.channels[2**i]\n\n            self.convs.append(\n                StyledConv(\n                    in_channel * self.feat_multiplier,\n                    out_channel,\n                    3,\n                    style_dim,\n                    upsample=True,\n                    blur_kernel=blur_kernel,\n                    isconcat=isconcat,\n                )\n            )\n\n            self.convs.append(\n                StyledConv(\n                    out_channel * self.feat_multiplier,\n                    out_channel,\n                    3,\n                    style_dim,\n                    blur_kernel=blur_kernel,\n                    isconcat=isconcat,\n                )\n            )\n\n            self.to_rgbs.append(ToRGB(out_channel * self.feat_multiplier, style_dim))\n\n            in_channel = out_channel\n\n        self.n_latent = self.log_size * 2 - 2\n\n    def make_noise(self):\n        device = self.input.input.device\n\n        noises = [torch.randn(1, 1, 2**2, 2**2, device=device)]\n\n        for i in range(3, self.log_size + 1):\n            for _ in range(2):\n                noises.append(torch.randn(1, 1, 2**i, 2**i, device=device))\n\n        return noises\n\n    def mean_latent(self, n_latent):\n        latent_in = torch.randn(\n            n_latent, self.style_dim, device=self.input.input.device\n        )\n        latent = self.style(latent_in).mean(0, keepdim=True)\n\n        return latent\n\n    def get_latent(self, input):\n        return self.style(input)\n\n    def forward(\n        self,\n        styles,\n        return_latents=False,\n        inject_index=None,\n        truncation=1,\n        truncation_latent=None,\n        input_is_latent=False,\n        noise=None,\n    ):\n        if not input_is_latent:\n            styles = [self.style(s) for s in styles]\n\n        if noise is None:\n            \"\"\"\n            noise = [None] * (2 * (self.log_size - 2) + 1)\n            \"\"\"\n            noise = []\n            batch = styles[0].shape[0]\n            for i in range(self.n_mlp + 1):\n                size = 2 ** (i + 2)\n                noise.append(\n                    torch.randn(\n                        batch, self.channels[size], size, size, device=styles[0].device\n                    )\n                )\n\n        if truncation < 1:\n            style_t = []\n\n            for style in styles:\n                style_t.append(\n                    truncation_latent + truncation * (style - truncation_latent)\n                )\n\n            styles = style_t\n\n        if len(styles) < 2:\n            inject_index = self.n_latent\n\n            latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n\n        else:\n            if inject_index is None:\n                inject_index = random.randint(1, self.n_latent - 1)\n\n            latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1)\n            latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1)\n\n            latent = torch.cat([latent, latent2], 1)\n\n        out = self.input(latent)\n        out = self.conv1(out, latent[:, 0], noise=noise[0])\n\n        skip = self.to_rgb1(out, latent[:, 1])\n\n        i = 1\n        for conv1, conv2, noise1, noise2, to_rgb in zip(\n            self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs\n        ):\n            out = conv1(out, latent[:, i], noise=noise1)\n            out = conv2(out, latent[:, i + 1], noise=noise2)\n            skip = to_rgb(out, latent[:, i + 2], skip)\n\n            i += 2\n\n        image = skip\n\n        if return_latents:\n            return image, latent\n\n        else:\n            return image, None\n\n\nclass ConvLayer(nn.Sequential):\n    def __init__(\n        self,\n        in_channel,\n        out_channel,\n        kernel_size,\n        downsample=False,\n        blur_kernel=[1, 3, 3, 1],\n        bias=True,\n        activate=True,\n    ):\n        layers = []\n\n        if downsample:\n            factor = 2\n            p = (len(blur_kernel) - factor) + (kernel_size - 1)\n            pad0 = (p + 1) // 2\n            pad1 = p // 2\n\n            layers.append(Blur(blur_kernel, pad=(pad0, pad1)))\n\n            stride = 2\n            self.padding = 0\n\n        else:\n            stride = 1\n            self.padding = kernel_size // 2\n\n        layers.append(\n            EqualConv2d(\n                in_channel,\n                out_channel,\n                kernel_size,\n                padding=self.padding,\n                stride=stride,\n                bias=bias and not activate,\n            )\n        )\n\n        if activate:\n            if bias:\n                layers.append(FusedLeakyReLU(out_channel))\n\n            else:\n                layers.append(ScaledLeakyReLU(0.2))\n\n        super().__init__(*layers)\n\n\nclass ResBlock(nn.Module):\n    def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]):\n        super().__init__()\n\n        self.conv1 = ConvLayer(in_channel, in_channel, 3)\n        self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True)\n\n        self.skip = ConvLayer(\n            in_channel, out_channel, 1, downsample=True, activate=False, bias=False\n        )\n\n    def forward(self, input):\n        out = self.conv1(input)\n        out = self.conv2(out)\n\n        skip = self.skip(input)\n        out = (out + skip) / math.sqrt(2)\n\n        return out\n\n\nclass FullGenerator(nn.Module):\n    def __init__(\n        self,\n        size,\n        style_dim,\n        n_mlp,\n        channel_multiplier=2,\n        blur_kernel=[1, 3, 3, 1],\n        lr_mlp=0.01,\n        isconcat=True,\n        narrow=1,\n    ):\n        super().__init__()\n        channels = {\n            4: int(512 * narrow),\n            8: int(512 * narrow),\n            16: int(512 * narrow),\n            32: int(512 * narrow),\n            64: int(256 * channel_multiplier * narrow),\n            128: int(128 * channel_multiplier * narrow),\n            256: int(64 * channel_multiplier * narrow),\n            512: int(32 * channel_multiplier * narrow),\n            1024: int(16 * channel_multiplier * narrow),\n        }\n\n        self.log_size = int(math.log(size, 2))\n        self.generator = Generator(\n            size,\n            style_dim,\n            n_mlp,\n            channel_multiplier=channel_multiplier,\n            blur_kernel=blur_kernel,\n            lr_mlp=lr_mlp,\n            isconcat=isconcat,\n            narrow=narrow,\n        )\n\n        conv = [ConvLayer(3, channels[size], 1)]\n        self.ecd0 = nn.Sequential(*conv)\n        in_channel = channels[size]\n\n        self.names = [\"ecd%d\" % i for i in range(self.log_size - 1)]\n        for i in range(self.log_size, 2, -1):\n            out_channel = channels[2 ** (i - 1)]\n            conv = [ConvLayer(in_channel, out_channel, 3, downsample=True)]\n            setattr(self, self.names[self.log_size - i + 1], nn.Sequential(*conv))\n            in_channel = out_channel\n        self.final_linear = nn.Sequential(\n            EqualLinear(channels[4] * 4 * 4, style_dim, activation=\"fused_lrelu\")\n        )\n\n    def forward(\n        self,\n        inputs,\n        return_latents=False,\n        inject_index=None,\n        truncation=1,\n        truncation_latent=None,\n        input_is_latent=False,\n    ):\n        noise = []\n        for i in range(self.log_size - 1):\n            ecd = getattr(self, self.names[i])\n            inputs = ecd(inputs)\n            noise.append(inputs)\n\n        inputs = inputs.view(inputs.shape[0], -1)\n        outs = self.final_linear(inputs)\n        noise = list(\n            itertools.chain.from_iterable(itertools.repeat(x, 2) for x in noise)\n        )[::-1]\n        outs = self.generator(\n            [outs],\n            return_latents,\n            inject_index,\n            truncation,\n            truncation_latent,\n            input_is_latent,\n            noise=noise[1:],\n        )\n        return outs\n\n\nclass Discriminator(nn.Module):\n    def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1], narrow=1):\n        super().__init__()\n\n        channels = {\n            4: int(512 * narrow),\n            8: int(512 * narrow),\n            16: int(512 * narrow),\n            32: int(512 * narrow),\n            64: int(256 * channel_multiplier * narrow),\n            128: int(128 * channel_multiplier * narrow),\n            256: int(64 * channel_multiplier * narrow),\n            512: int(32 * channel_multiplier * narrow),\n            1024: int(16 * channel_multiplier * narrow),\n        }\n\n        convs = [ConvLayer(3, channels[size], 1)]\n\n        log_size = int(math.log(size, 2))\n\n        in_channel = channels[size]\n\n        for i in range(log_size, 2, -1):\n            out_channel = channels[2 ** (i - 1)]\n\n            convs.append(ResBlock(in_channel, out_channel, blur_kernel))\n\n            in_channel = out_channel\n\n        self.convs = nn.Sequential(*convs)\n\n        self.stddev_group = 4\n        self.stddev_feat = 1\n\n        self.final_conv = ConvLayer(in_channel + 1, channels[4], 3)\n        self.final_linear = nn.Sequential(\n            EqualLinear(channels[4] * 4 * 4, channels[4], activation=\"fused_lrelu\"),\n            EqualLinear(channels[4], 1),\n        )\n\n    def forward(self, input):\n        out = self.convs(input)\n\n        batch, channel, height, width = out.shape\n        group = min(batch, self.stddev_group)\n        stddev = out.view(\n            group, -1, self.stddev_feat, channel // self.stddev_feat, height, width\n        )\n        stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8)\n        stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2)\n        stddev = stddev.repeat(group, 1, height, width)\n        out = torch.cat([out, stddev], 1)\n\n        out = self.final_conv(out)\n\n        out = out.view(batch, -1)\n        out = self.final_linear(out)\n        return out\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/__init__.py",
    "content": "#!/usr/bin/env python3\n\n# from .fused_act import FusedLeakyReLU, fused_leaky_relu\n# from .upfirdn2d import upfirdn2d\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/fused_act.py",
    "content": "#!/usr/bin/env python3\n\n# This file is no longer used\n\nimport os\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Function\nfrom torch.utils.cpp_extension import load\n\nmodule_path = os.path.dirname(__file__)\n\ntry:\n    try:\n        fused = load(\n            \"fused\",\n            sources=[\n                os.path.join(module_path, \"fused_bias_act.cpp\"),\n                os.path.join(module_path, \"fused_bias_act_kernel.cu\"),\n            ],\n        )\n    except Exception as e:\n        print(e)\n        fused = load(\n            \"fused\",\n            sources=[\n                os.path.join(module_path, \"fused_bias_act.cpp\"),\n                os.path.join(module_path, \"fused_bias_act_kernel.cu\"),\n            ],\n        )\nexcept Exception as e:\n    print(e)\n    fused = load(\n        \"fused\",\n        sources=[\n            os.path.join(module_path, \"fused_bias_act.cpp\"),\n            os.path.join(module_path, \"fused_bias_act_kernel.cu\"),\n        ],\n    )\n\n\nclass FusedLeakyReLUFunctionBackward(Function):\n    @staticmethod\n    def forward(ctx, grad_output, out, negative_slope, scale):\n        ctx.save_for_backward(out)\n        ctx.negative_slope = negative_slope\n        ctx.scale = scale\n\n        empty = grad_output.new_empty(0)\n\n        grad_input = fused.fused_bias_act(\n            grad_output, empty, out, 3, 1, negative_slope, scale\n        )\n\n        dim = [0]\n\n        if grad_input.ndim > 2:\n            dim += list(range(2, grad_input.ndim))\n\n        grad_bias = grad_input.sum(dim).detach()\n\n        return grad_input, grad_bias\n\n    @staticmethod\n    def backward(ctx, gradgrad_input, gradgrad_bias):\n        (out,) = ctx.saved_tensors\n        gradgrad_out = fused.fused_bias_act(\n            gradgrad_input, gradgrad_bias, out, 3, 1, ctx.negative_slope, ctx.scale\n        )\n\n        return gradgrad_out, None, None, None\n\n\nclass FusedLeakyReLUFunction(Function):\n    @staticmethod\n    def forward(ctx, input, bias, negative_slope, scale):\n        empty = input.new_empty(0)\n        out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope, scale)\n        ctx.save_for_backward(out)\n        ctx.negative_slope = negative_slope\n        ctx.scale = scale\n\n        return out\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        (out,) = ctx.saved_tensors\n\n        grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(\n            grad_output, out, ctx.negative_slope, ctx.scale\n        )\n\n        return grad_input, grad_bias, None, None\n\n\nclass FusedLeakyReLU(nn.Module):\n    def __init__(self, channel, negative_slope=0.2, scale=2**0.5):\n        super().__init__()\n\n        self.bias = nn.Parameter(torch.zeros(channel))\n        self.negative_slope = negative_slope\n        self.scale = scale\n\n    def forward(self, input):\n        return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)\n\n\ndef fused_leaky_relu(input, bias, negative_slope=0.2, scale=2**0.5):\n    return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/fused_act_v2.py",
    "content": "#!/usr/bin/env python3\n\nimport os\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nmodule_path = os.path.dirname(__file__)\n\n\nclass FusedLeakyReLU_v2(nn.Module):\n    def __init__(self, channel, negative_slope=0.2, scale=2**0.5):\n        super().__init__()\n\n        self.bias = nn.Parameter(torch.zeros(channel))\n        self.negative_slope = negative_slope\n        self.scale = scale\n\n    def forward(self, input):\n        return fused_leaky_relu_v2(input, self.bias, self.negative_slope, self.scale)\n\n\ndef fused_leaky_relu_v2(input, bias, negative_slope=0.2, scale=2**0.5):\n    rest_dim = [1] * (input.ndim - bias.ndim - 1)\n    if input.ndim == 3:\n        return (\n            F.leaky_relu(\n                input + bias.view(1, *rest_dim, bias.shape[0]),\n                negative_slope=negative_slope,\n            )\n            * scale\n        )\n    else:\n        return (\n            F.leaky_relu(\n                input + bias.view(1, bias.shape[0], *rest_dim),\n                negative_slope=negative_slope,\n            )\n            * scale\n        )\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/fused_bias_act.cpp",
    "content": "// This file is no longer used\n\n#include <torch/extension.h>\n\n\ntorch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,\n    int act, int grad, float alpha, float scale);\n\n#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x \" must be a CUDA tensor\")\n#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x \" must be contiguous\")\n#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)\n\ntorch::Tensor fused_bias_act(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,\n    int act, int grad, float alpha, float scale) {\n    CHECK_CUDA(input);\n    CHECK_CUDA(bias);\n\n    return fused_bias_act_op(input, bias, refer, act, grad, alpha, scale);\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"fused_bias_act\", &fused_bias_act, \"fused bias act (CUDA)\");\n}\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/fused_bias_act_kernel.cu",
    "content": "// This file is no longer used\n\n// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n//\n// This work is made available under the Nvidia Source Code License-NC.\n// To view a copy of this license, visit\n// https://nvlabs.github.io/stylegan2/license.html\n\n#include <torch/types.h>\n\n#include <ATen/ATen.h>\n#include <ATen/AccumulateType.h>\n#include <ATen/cuda/CUDAContext.h>\n#include <ATen/cuda/CUDAApplyUtils.cuh>\n\n#include <cuda.h>\n#include <cuda_runtime.h>\n\n\ntemplate <typename scalar_t>\nstatic __global__ void fused_bias_act_kernel(scalar_t* out, const scalar_t* p_x, const scalar_t* p_b, const scalar_t* p_ref,\n    int act, int grad, scalar_t alpha, scalar_t scale, int loop_x, int size_x, int step_b, int size_b, int use_bias, int use_ref) {\n    int xi = blockIdx.x * loop_x * blockDim.x + threadIdx.x;\n\n    scalar_t zero = 0.0;\n\n    for (int loop_idx = 0; loop_idx < loop_x && xi < size_x; loop_idx++, xi += blockDim.x) {\n        scalar_t x = p_x[xi];\n\n        if (use_bias) {\n            x += p_b[(xi / step_b) % size_b];\n        }\n\n        scalar_t ref = use_ref ? p_ref[xi] : zero;\n\n        scalar_t y;\n\n        switch (act * 10 + grad) {\n            default:\n            case 10: y = x; break;\n            case 11: y = x; break;\n            case 12: y = 0.0; break;\n\n            case 30: y = (x > 0.0) ? x : x * alpha; break;\n            case 31: y = (ref > 0.0) ? x : x * alpha; break;\n            case 32: y = 0.0; break;\n        }\n\n        out[xi] = y * scale;\n    }\n}\n\n\ntorch::Tensor fused_bias_act_op(const torch::Tensor& input, const torch::Tensor& bias, const torch::Tensor& refer,\n    int act, int grad, float alpha, float scale) {\n    int curDevice = -1;\n    cudaGetDevice(&curDevice);\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);\n\n    auto x = input.contiguous();\n    auto b = bias.contiguous();\n    auto ref = refer.contiguous();\n\n    int use_bias = b.numel() ? 1 : 0;\n    int use_ref = ref.numel() ? 1 : 0;\n\n    int size_x = x.numel();\n    int size_b = b.numel();\n    int step_b = 1;\n\n    for (int i = 1 + 1; i < x.dim(); i++) {\n        step_b *= x.size(i);\n    }\n\n    int loop_x = 4;\n    int block_size = 4 * 32;\n    int grid_size = (size_x - 1) / (loop_x * block_size) + 1;\n\n    auto y = torch::empty_like(x);\n\n    AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), \"fused_bias_act_kernel\", [&] {\n        fused_bias_act_kernel<scalar_t><<<grid_size, block_size, 0, stream>>>(\n            y.data_ptr<scalar_t>(),\n            x.data_ptr<scalar_t>(),\n            b.data_ptr<scalar_t>(),\n            ref.data_ptr<scalar_t>(),\n            act,\n            grad,\n            alpha,\n            scale,\n            loop_x,\n            size_x,\n            step_b,\n            size_b,\n            use_bias,\n            use_ref\n        );\n    });\n\n    return y;\n}\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/upfirdn2d.cpp",
    "content": "// This file is no longer used\n\n#include <torch/extension.h>\n\n\ntorch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,\n                            int up_x, int up_y, int down_x, int down_y,\n                            int pad_x0, int pad_x1, int pad_y0, int pad_y1);\n\n#define CHECK_CUDA(x) TORCH_CHECK(x.type().is_cuda(), #x \" must be a CUDA tensor\")\n#define CHECK_CONTIGUOUS(x) TORCH_CHECK(x.is_contiguous(), #x \" must be contiguous\")\n#define CHECK_INPUT(x) CHECK_CUDA(x); CHECK_CONTIGUOUS(x)\n\ntorch::Tensor upfirdn2d(const torch::Tensor& input, const torch::Tensor& kernel,\n                        int up_x, int up_y, int down_x, int down_y,\n                        int pad_x0, int pad_x1, int pad_y0, int pad_y1) {\n    CHECK_CUDA(input);\n    CHECK_CUDA(kernel);\n\n    return upfirdn2d_op(input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1);\n}\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"upfirdn2d\", &upfirdn2d, \"upfirdn2d (CUDA)\");\n}\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/upfirdn2d.py",
    "content": "#!/usr/bin/env python3\n\n# This file is no longer used\n\nimport os\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.utils.cpp_extension import load\n\nmodule_path = os.path.dirname(__file__)\nupfirdn2d_op = load(\n    \"upfirdn2d\",\n    sources=[\n        os.path.join(module_path, \"upfirdn2d.cpp\"),\n        os.path.join(module_path, \"upfirdn2d_kernel.cu\"),\n    ],\n)\n\n\nclass UpFirDn2dBackward(Function):\n    @staticmethod\n    def forward(\n        ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad, in_size, out_size\n    ):\n\n        up_x, up_y = up\n        down_x, down_y = down\n        g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad\n\n        grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)\n\n        grad_input = upfirdn2d_op.upfirdn2d(\n            grad_output,\n            grad_kernel,\n            down_x,\n            down_y,\n            up_x,\n            up_y,\n            g_pad_x0,\n            g_pad_x1,\n            g_pad_y0,\n            g_pad_y1,\n        )\n        grad_input = grad_input.view(in_size[0], in_size[1], in_size[2], in_size[3])\n\n        ctx.save_for_backward(kernel)\n\n        pad_x0, pad_x1, pad_y0, pad_y1 = pad\n\n        ctx.up_x = up_x\n        ctx.up_y = up_y\n        ctx.down_x = down_x\n        ctx.down_y = down_y\n        ctx.pad_x0 = pad_x0\n        ctx.pad_x1 = pad_x1\n        ctx.pad_y0 = pad_y0\n        ctx.pad_y1 = pad_y1\n        ctx.in_size = in_size\n        ctx.out_size = out_size\n\n        return grad_input\n\n    @staticmethod\n    def backward(ctx, gradgrad_input):\n        (kernel,) = ctx.saved_tensors\n\n        gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.in_size[3], 1)\n\n        gradgrad_out = upfirdn2d_op.upfirdn2d(\n            gradgrad_input,\n            kernel,\n            ctx.up_x,\n            ctx.up_y,\n            ctx.down_x,\n            ctx.down_y,\n            ctx.pad_x0,\n            ctx.pad_x1,\n            ctx.pad_y0,\n            ctx.pad_y1,\n        )\n        gradgrad_out = gradgrad_out.view(\n            ctx.in_size[0], ctx.in_size[1], ctx.out_size[0], ctx.out_size[1]\n        )\n\n        return gradgrad_out, None, None, None, None, None, None, None, None\n\n\nclass UpFirDn2d(Function):\n    @staticmethod\n    def forward(ctx, input, kernel, up, down, pad):\n        up_x, up_y = up\n        down_x, down_y = down\n        pad_x0, pad_x1, pad_y0, pad_y1 = pad\n\n        kernel_h, kernel_w = kernel.shape\n        batch, channel, in_h, in_w = input.shape\n        ctx.in_size = input.shape\n\n        input = input.reshape(-1, in_h, in_w, 1)\n\n        ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))\n\n        out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1\n        out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1\n        ctx.out_size = (out_h, out_w)\n\n        ctx.up = (up_x, up_y)\n        ctx.down = (down_x, down_y)\n        ctx.pad = (pad_x0, pad_x1, pad_y0, pad_y1)\n\n        g_pad_x0 = kernel_w - pad_x0 - 1\n        g_pad_y0 = kernel_h - pad_y0 - 1\n        g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1\n        g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1\n\n        ctx.g_pad = (g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)\n\n        out = upfirdn2d_op.upfirdn2d(\n            input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1\n        )\n        out = out.view(-1, channel, out_h, out_w)\n\n        return out\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        kernel, grad_kernel = ctx.saved_tensors\n\n        grad_input = UpFirDn2dBackward.apply(\n            grad_output,\n            kernel,\n            grad_kernel,\n            ctx.up,\n            ctx.down,\n            ctx.pad,\n            ctx.g_pad,\n            ctx.in_size,\n            ctx.out_size,\n        )\n\n        return grad_input, None, None, None, None\n\n\ndef upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):\n    out = UpFirDn2d.apply(\n        input, kernel, (up, up), (down, down), (pad[0], pad[1], pad[0], pad[1])\n    )\n\n    return out\n\n\ndef upfirdn2d_native(\n    input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1\n):\n    _, in_h, in_w, minor = input.shape\n    kernel_h, kernel_w = kernel.shape\n\n    out = input.view(-1, in_h, 1, in_w, 1, minor)\n    out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])\n    out = out.view(-1, in_h * up_y, in_w * up_x, minor)\n\n    out = F.pad(\n        out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]\n    )\n    out = out[\n        :,\n        max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),\n        max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),\n        :,\n    ]\n\n    out = out.permute(0, 3, 1, 2)\n    out = out.reshape(\n        [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]\n    )\n    w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)\n    out = F.conv2d(out, w)\n    out = out.reshape(\n        -1,\n        minor,\n        in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,\n        in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,\n    )\n    out = out.permute(0, 2, 3, 1)\n\n    return out[:, ::down_y, ::down_x, :]\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/upfirdn2d_kernel.cu",
    "content": "// This file is no longer used\n\n// Copyright (c) 2019, NVIDIA Corporation. All rights reserved.\n//\n// This work is made available under the Nvidia Source Code License-NC.\n// To view a copy of this license, visit\n// https://nvlabs.github.io/stylegan2/license.html\n\n#include <torch/types.h>\n\n#include <ATen/ATen.h>\n#include <ATen/AccumulateType.h>\n#include <ATen/cuda/CUDAContext.h>\n#include <ATen/cuda/CUDAApplyUtils.cuh>\n\n#include <cuda.h>\n#include <cuda_runtime.h>\n\n\nstatic __host__ __device__ __forceinline__ int floor_div(int a, int b) {\n    int c = a / b;\n\n    if (c * b > a) {\n        c--;\n    }\n\n    return c;\n}\n\n\nstruct UpFirDn2DKernelParams {\n    int up_x;\n    int up_y;\n    int down_x;\n    int down_y;\n    int pad_x0;\n    int pad_x1;\n    int pad_y0;\n    int pad_y1;\n\n    int major_dim;\n    int in_h;\n    int in_w;\n    int minor_dim;\n    int kernel_h;\n    int kernel_w;\n    int out_h;\n    int out_w;\n    int loop_major;\n    int loop_x;\n};\n\n\ntemplate <typename scalar_t, int up_x, int up_y, int down_x, int down_y, int kernel_h, int kernel_w, int tile_out_h, int tile_out_w>\n__global__ void upfirdn2d_kernel(scalar_t* out, const scalar_t* input, const scalar_t* kernel, const UpFirDn2DKernelParams p) {\n    const int tile_in_h = ((tile_out_h - 1) * down_y + kernel_h - 1) / up_y + 1;\n    const int tile_in_w = ((tile_out_w - 1) * down_x + kernel_w - 1) / up_x + 1;\n\n    __shared__ volatile float sk[kernel_h][kernel_w];\n    __shared__ volatile float sx[tile_in_h][tile_in_w];\n\n    int minor_idx = blockIdx.x;\n    int tile_out_y = minor_idx / p.minor_dim;\n    minor_idx -= tile_out_y * p.minor_dim;\n    tile_out_y *= tile_out_h;\n    int tile_out_x_base = blockIdx.y * p.loop_x * tile_out_w;\n    int major_idx_base = blockIdx.z * p.loop_major;\n\n    if (tile_out_x_base >= p.out_w | tile_out_y >= p.out_h | major_idx_base >= p.major_dim) {\n        return;\n    }\n\n    for (int tap_idx = threadIdx.x; tap_idx < kernel_h * kernel_w; tap_idx += blockDim.x) {\n        int ky = tap_idx / kernel_w;\n        int kx = tap_idx - ky * kernel_w;\n        scalar_t v = 0.0;\n\n        if (kx < p.kernel_w & ky < p.kernel_h) {\n            v = kernel[(p.kernel_h - 1 - ky) * p.kernel_w + (p.kernel_w - 1 - kx)];\n        }\n\n        sk[ky][kx] = v;\n    }\n\n    for (int loop_major = 0, major_idx = major_idx_base; loop_major < p.loop_major & major_idx < p.major_dim; loop_major++, major_idx++) {\n        for (int loop_x = 0, tile_out_x = tile_out_x_base; loop_x < p.loop_x & tile_out_x < p.out_w; loop_x++, tile_out_x += tile_out_w) {\n            int tile_mid_x = tile_out_x * down_x + up_x - 1 - p.pad_x0;\n            int tile_mid_y = tile_out_y * down_y + up_y - 1 - p.pad_y0;\n            int tile_in_x = floor_div(tile_mid_x, up_x);\n            int tile_in_y = floor_div(tile_mid_y, up_y);\n\n            __syncthreads();\n\n            for (int in_idx = threadIdx.x; in_idx < tile_in_h * tile_in_w; in_idx += blockDim.x) {\n                int rel_in_y = in_idx / tile_in_w;\n                int rel_in_x = in_idx - rel_in_y * tile_in_w;\n                int in_x = rel_in_x + tile_in_x;\n                int in_y = rel_in_y + tile_in_y;\n\n                scalar_t v = 0.0;\n\n                if (in_x >= 0 & in_y >= 0 & in_x < p.in_w & in_y < p.in_h) {\n                    v = input[((major_idx * p.in_h + in_y) * p.in_w + in_x) * p.minor_dim + minor_idx];\n                }\n\n                sx[rel_in_y][rel_in_x] = v;\n            }\n\n            __syncthreads();\n            for (int out_idx = threadIdx.x; out_idx < tile_out_h * tile_out_w; out_idx += blockDim.x) {\n                int rel_out_y = out_idx / tile_out_w;\n                int rel_out_x = out_idx - rel_out_y * tile_out_w;\n                int out_x = rel_out_x + tile_out_x;\n                int out_y = rel_out_y + tile_out_y;\n\n                int mid_x = tile_mid_x + rel_out_x * down_x;\n                int mid_y = tile_mid_y + rel_out_y * down_y;\n                int in_x = floor_div(mid_x, up_x);\n                int in_y = floor_div(mid_y, up_y);\n                int rel_in_x = in_x - tile_in_x;\n                int rel_in_y = in_y - tile_in_y;\n                int kernel_x = (in_x + 1) * up_x - mid_x - 1;\n                int kernel_y = (in_y + 1) * up_y - mid_y - 1;\n\n                scalar_t v = 0.0;\n\n                #pragma unroll\n                for (int y = 0; y < kernel_h / up_y; y++)\n                    #pragma unroll\n                    for (int x = 0; x < kernel_w / up_x; x++)\n                        v += sx[rel_in_y + y][rel_in_x + x] * sk[kernel_y + y * up_y][kernel_x + x * up_x];\n\n                if (out_x < p.out_w & out_y < p.out_h) {\n                    out[((major_idx * p.out_h + out_y) * p.out_w + out_x) * p.minor_dim + minor_idx] = v;\n                }\n            }\n        }\n    }\n}\n\n\ntorch::Tensor upfirdn2d_op(const torch::Tensor& input, const torch::Tensor& kernel,\n    int up_x, int up_y, int down_x, int down_y,\n    int pad_x0, int pad_x1, int pad_y0, int pad_y1) {\n    int curDevice = -1;\n    cudaGetDevice(&curDevice);\n    cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);\n\n    UpFirDn2DKernelParams p;\n\n    auto x = input.contiguous();\n    auto k = kernel.contiguous();\n\n    p.major_dim = x.size(0);\n    p.in_h = x.size(1);\n    p.in_w = x.size(2);\n    p.minor_dim = x.size(3);\n    p.kernel_h = k.size(0);\n    p.kernel_w = k.size(1);\n    p.up_x = up_x;\n    p.up_y = up_y;\n    p.down_x = down_x;\n    p.down_y = down_y;\n    p.pad_x0 = pad_x0;\n    p.pad_x1 = pad_x1;\n    p.pad_y0 = pad_y0;\n    p.pad_y1 = pad_y1;\n\n    p.out_h = (p.in_h * p.up_y + p.pad_y0 + p.pad_y1 - p.kernel_h + p.down_y) / p.down_y;\n    p.out_w = (p.in_w * p.up_x + p.pad_x0 + p.pad_x1 - p.kernel_w + p.down_x) / p.down_x;\n\n    auto out = at::empty({p.major_dim, p.out_h, p.out_w, p.minor_dim}, x.options());\n\n    int mode = -1;\n\n    int tile_out_h;\n    int tile_out_w;\n\n    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {\n        mode = 1;\n        tile_out_h = 16;\n        tile_out_w = 64;\n    }\n\n    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 3 && p.kernel_w <= 3) {\n        mode = 2;\n        tile_out_h = 16;\n        tile_out_w = 64;\n    }\n\n    if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 4 && p.kernel_w <= 4) {\n        mode = 3;\n        tile_out_h = 16;\n        tile_out_w = 64;\n    }\n\n    if (p.up_x == 2 && p.up_y == 2 && p.down_x == 1 && p.down_y == 1 && p.kernel_h <= 2 && p.kernel_w <= 2) {\n        mode = 4;\n        tile_out_h = 16;\n        tile_out_w = 64;\n    }\n\n    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 4 && p.kernel_w <= 4) {\n        mode = 5;\n        tile_out_h = 8;\n        tile_out_w = 32;\n    }\n\n    if (p.up_x == 1 && p.up_y == 1 && p.down_x == 2 && p.down_y == 2 && p.kernel_h <= 2 && p.kernel_w <= 2) {\n        mode = 6;\n        tile_out_h = 8;\n        tile_out_w = 32;\n    }\n\n    dim3 block_size;\n    dim3 grid_size;\n\n    if (tile_out_h > 0 && tile_out_w) {\n        p.loop_major = (p.major_dim - 1) / 16384 + 1;\n        p.loop_x = 1;\n        block_size = dim3(32 * 8, 1, 1);\n        grid_size = dim3(((p.out_h - 1) / tile_out_h + 1) * p.minor_dim,\n                         (p.out_w - 1) / (p.loop_x * tile_out_w) + 1,\n                         (p.major_dim - 1) / p.loop_major + 1);\n    }\n\n    AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), \"upfirdn2d_cuda\", [&] {\n        switch (mode) {\n        case 1:\n            upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(\n                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p\n            );\n\n            break;\n\n        case 2:\n            upfirdn2d_kernel<scalar_t, 1, 1, 1, 1, 3, 3, 16, 64><<<grid_size, block_size, 0, stream>>>(\n                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p\n            );\n\n            break;\n\n        case 3:\n            upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 4, 4, 16, 64><<<grid_size, block_size, 0, stream>>>(\n                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p\n            );\n\n            break;\n\n        case 4:\n            upfirdn2d_kernel<scalar_t, 2, 2, 1, 1, 2, 2, 16, 64><<<grid_size, block_size, 0, stream>>>(\n                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p\n            );\n\n            break;\n\n        case 5:\n            upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(\n                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p\n            );\n\n            break;\n\n        case 6:\n            upfirdn2d_kernel<scalar_t, 1, 1, 2, 2, 4, 4, 8, 32><<<grid_size, block_size, 0, stream>>>(\n                out.data_ptr<scalar_t>(), x.data_ptr<scalar_t>(), k.data_ptr<scalar_t>(), p\n            );\n\n            break;\n        }\n    });\n\n    return out;\n}\n"
  },
  {
    "path": "src/dot/gpen/face_model/op/upfirdn2d_v2.py",
    "content": "#!/usr/bin/env python3\n\nimport os\n\nimport torch\nfrom torch.nn import functional as F\n\nmodule_path = os.path.dirname(__file__)\n\n\ndef upfirdn2d_v2(input, kernel, up=1, down=1, pad=(0, 0)):\n    out = upfirdn2d_native(\n        input, kernel, up, up, down, down, pad[0], pad[1], pad[0], pad[1]\n    )\n\n    return out\n\n\ndef upfirdn2d_native(\n    input, kernel, up_x, up_y, down_x, down_y, pad_x0, pad_x1, pad_y0, pad_y1\n):\n    _, channel, in_h, in_w = input.shape\n    input = input.reshape(-1, in_h, in_w, 1)\n\n    _, in_h, in_w, minor = input.shape\n    kernel_h, kernel_w = kernel.shape\n\n    out = input.view(-1, in_h, 1, in_w, 1, minor)\n    out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])\n    out = out.view(-1, in_h * up_y, in_w * up_x, minor)\n\n    out = F.pad(\n        out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0), max(pad_y1, 0)]\n    )\n    out = out[\n        :,\n        max(-pad_y0, 0) : out.shape[1] - max(-pad_y1, 0),\n        max(-pad_x0, 0) : out.shape[2] - max(-pad_x1, 0),\n        :,\n    ]\n\n    out = out.permute(0, 3, 1, 2)\n    out = out.reshape(\n        [-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x + pad_x0 + pad_x1]\n    )\n    w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)\n    out = F.conv2d(out, w)\n    out = out.reshape(\n        -1,\n        minor,\n        in_h * up_y + pad_y0 + pad_y1 - kernel_h + 1,\n        in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1,\n    )\n    out = out.permute(0, 2, 3, 1)\n    out = out[:, ::down_y, ::down_x, :]\n\n    out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1\n    out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1\n\n    return out.view(-1, channel, out_h, out_w)\n"
  },
  {
    "path": "src/dot/gpen/retinaface/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/retinaface/data/FDDB/img_list.txt",
    "content": "2002/08/11/big/img_591\n2002/08/26/big/img_265\n2002/07/19/big/img_423\n2002/08/24/big/img_490\n2002/08/31/big/img_17676\n2002/07/31/big/img_228\n2002/07/24/big/img_402\n2002/08/04/big/img_769\n2002/07/19/big/img_581\n2002/08/13/big/img_723\n2002/08/12/big/img_821\n2003/01/17/big/img_610\n2002/08/13/big/img_1116\n2002/08/28/big/img_19238\n2002/08/21/big/img_660\n2002/08/14/big/img_607\n2002/08/05/big/img_3708\n2002/08/19/big/img_511\n2002/08/07/big/img_1316\n2002/07/25/big/img_1047\n2002/07/23/big/img_474\n2002/07/27/big/img_970\n2002/09/02/big/img_15752\n2002/09/01/big/img_16378\n2002/09/01/big/img_16189\n2002/08/26/big/img_276\n2002/07/24/big/img_518\n2002/08/14/big/img_1027\n2002/08/24/big/img_733\n2002/08/15/big/img_249\n2003/01/15/big/img_1371\n2002/08/07/big/img_1348\n2003/01/01/big/img_331\n2002/08/23/big/img_536\n2002/07/30/big/img_224\n2002/08/10/big/img_763\n2002/08/21/big/img_293\n2002/08/15/big/img_1211\n2002/08/15/big/img_1194\n2003/01/15/big/img_390\n2002/08/06/big/img_2893\n2002/08/17/big/img_691\n2002/08/07/big/img_1695\n2002/08/16/big/img_829\n2002/07/25/big/img_201\n2002/08/23/big/img_36\n2003/01/15/big/img_763\n2003/01/15/big/img_637\n2002/08/22/big/img_592\n2002/07/25/big/img_817\n2003/01/15/big/img_1219\n2002/08/05/big/img_3508\n2002/08/15/big/img_1108\n2002/07/19/big/img_488\n2003/01/16/big/img_704\n2003/01/13/big/img_1087\n2002/08/10/big/img_670\n2002/07/24/big/img_104\n2002/08/27/big/img_19823\n2002/09/01/big/img_16229\n2003/01/13/big/img_846\n2002/08/04/big/img_412\n2002/07/22/big/img_554\n2002/08/12/big/img_331\n2002/08/02/big/img_533\n2002/08/12/big/img_259\n2002/08/18/big/img_328\n2003/01/14/big/img_630\n2002/08/05/big/img_3541\n2002/08/06/big/img_2390\n2002/08/20/big/img_150\n2002/08/02/big/img_1231\n2002/08/16/big/img_710\n2002/08/19/big/img_591\n2002/07/22/big/img_725\n2002/07/24/big/img_820\n2003/01/13/big/img_568\n2002/08/22/big/img_853\n2002/08/09/big/img_648\n2002/08/23/big/img_528\n2003/01/14/big/img_888\n2002/08/30/big/img_18201\n2002/08/13/big/img_965\n2003/01/14/big/img_660\n2002/07/19/big/img_517\n2003/01/14/big/img_406\n2002/08/30/big/img_18433\n2002/08/07/big/img_1630\n2002/08/06/big/img_2717\n2002/08/21/big/img_470\n2002/07/23/big/img_633\n2002/08/20/big/img_915\n2002/08/16/big/img_893\n2002/07/29/big/img_644\n2002/08/15/big/img_529\n2002/08/16/big/img_668\n2002/08/07/big/img_1871\n2002/07/25/big/img_192\n2002/07/31/big/img_961\n2002/08/19/big/img_738\n2002/07/31/big/img_382\n2002/08/19/big/img_298\n2003/01/17/big/img_608\n2002/08/21/big/img_514\n2002/07/23/big/img_183\n2003/01/17/big/img_536\n2002/07/24/big/img_478\n2002/08/06/big/img_2997\n2002/09/02/big/img_15380\n2002/08/07/big/img_1153\n2002/07/31/big/img_967\n2002/07/31/big/img_711\n2002/08/26/big/img_664\n2003/01/01/big/img_326\n2002/08/24/big/img_775\n2002/08/08/big/img_961\n2002/08/16/big/img_77\n2002/08/12/big/img_296\n2002/07/22/big/img_905\n2003/01/13/big/img_284\n2002/08/13/big/img_887\n2002/08/24/big/img_849\n2002/07/30/big/img_345\n2002/08/18/big/img_419\n2002/08/01/big/img_1347\n2002/08/05/big/img_3670\n2002/07/21/big/img_479\n2002/08/08/big/img_913\n2002/09/02/big/img_15828\n2002/08/30/big/img_18194\n2002/08/08/big/img_471\n2002/08/22/big/img_734\n2002/08/09/big/img_586\n2002/08/09/big/img_454\n2002/07/29/big/img_47\n2002/07/19/big/img_381\n2002/07/29/big/img_733\n2002/08/20/big/img_327\n2002/07/21/big/img_96\n2002/08/06/big/img_2680\n2002/07/25/big/img_919\n2002/07/21/big/img_158\n2002/07/22/big/img_801\n2002/07/22/big/img_567\n2002/07/24/big/img_804\n2002/07/24/big/img_690\n2003/01/15/big/img_576\n2002/08/14/big/img_335\n2003/01/13/big/img_390\n2002/08/11/big/img_258\n2002/07/23/big/img_917\n2002/08/15/big/img_525\n2003/01/15/big/img_505\n2002/07/30/big/img_886\n2003/01/16/big/img_640\n2003/01/14/big/img_642\n2003/01/17/big/img_844\n2002/08/04/big/img_571\n2002/08/29/big/img_18702\n2003/01/15/big/img_240\n2002/07/29/big/img_553\n2002/08/10/big/img_354\n2002/08/18/big/img_17\n2003/01/15/big/img_782\n2002/07/27/big/img_382\n2002/08/14/big/img_970\n2003/01/16/big/img_70\n2003/01/16/big/img_625\n2002/08/18/big/img_341\n2002/08/26/big/img_188\n2002/08/09/big/img_405\n2002/08/02/big/img_37\n2002/08/13/big/img_748\n2002/07/22/big/img_399\n2002/07/25/big/img_844\n2002/08/12/big/img_340\n2003/01/13/big/img_815\n2002/08/26/big/img_5\n2002/08/10/big/img_158\n2002/08/18/big/img_95\n2002/07/29/big/img_1297\n2003/01/13/big/img_508\n2002/09/01/big/img_16680\n2003/01/16/big/img_338\n2002/08/13/big/img_517\n2002/07/22/big/img_626\n2002/08/06/big/img_3024\n2002/07/26/big/img_499\n2003/01/13/big/img_387\n2002/08/31/big/img_18025\n2002/08/13/big/img_520\n2003/01/16/big/img_576\n2002/07/26/big/img_121\n2002/08/25/big/img_703\n2002/08/26/big/img_615\n2002/08/17/big/img_434\n2002/08/02/big/img_677\n2002/08/18/big/img_276\n2002/08/05/big/img_3672\n2002/07/26/big/img_700\n2002/07/31/big/img_277\n2003/01/14/big/img_220\n2002/08/23/big/img_232\n2002/08/31/big/img_17422\n2002/07/22/big/img_508\n2002/08/13/big/img_681\n2003/01/15/big/img_638\n2002/08/30/big/img_18408\n2003/01/14/big/img_533\n2003/01/17/big/img_12\n2002/08/28/big/img_19388\n2002/08/08/big/img_133\n2002/07/26/big/img_885\n2002/08/19/big/img_387\n2002/08/27/big/img_19976\n2002/08/26/big/img_118\n2002/08/28/big/img_19146\n2002/08/05/big/img_3259\n2002/08/15/big/img_536\n2002/07/22/big/img_279\n2002/07/22/big/img_9\n2002/08/13/big/img_301\n2002/08/15/big/img_974\n2002/08/06/big/img_2355\n2002/08/01/big/img_1526\n2002/08/03/big/img_417\n2002/08/04/big/img_407\n2002/08/15/big/img_1029\n2002/07/29/big/img_700\n2002/08/01/big/img_1463\n2002/08/31/big/img_17365\n2002/07/28/big/img_223\n2002/07/19/big/img_827\n2002/07/27/big/img_531\n2002/07/19/big/img_845\n2002/08/20/big/img_382\n2002/07/31/big/img_268\n2002/08/27/big/img_19705\n2002/08/02/big/img_830\n2002/08/23/big/img_250\n2002/07/20/big/img_777\n2002/08/21/big/img_879\n2002/08/26/big/img_20146\n2002/08/23/big/img_789\n2002/08/06/big/img_2683\n2002/08/25/big/img_576\n2002/08/09/big/img_498\n2002/08/08/big/img_384\n2002/08/26/big/img_592\n2002/07/29/big/img_1470\n2002/08/21/big/img_452\n2002/08/30/big/img_18395\n2002/08/15/big/img_215\n2002/07/21/big/img_643\n2002/07/22/big/img_209\n2003/01/17/big/img_346\n2002/08/25/big/img_658\n2002/08/21/big/img_221\n2002/08/14/big/img_60\n2003/01/17/big/img_885\n2003/01/16/big/img_482\n2002/08/19/big/img_593\n2002/08/08/big/img_233\n2002/07/30/big/img_458\n2002/07/23/big/img_384\n2003/01/15/big/img_670\n2003/01/15/big/img_267\n2002/08/26/big/img_540\n2002/07/29/big/img_552\n2002/07/30/big/img_997\n2003/01/17/big/img_377\n2002/08/21/big/img_265\n2002/08/09/big/img_561\n2002/07/31/big/img_945\n2002/09/02/big/img_15252\n2002/08/11/big/img_276\n2002/07/22/big/img_491\n2002/07/26/big/img_517\n2002/08/14/big/img_726\n2002/08/08/big/img_46\n2002/08/28/big/img_19458\n2002/08/06/big/img_2935\n2002/07/29/big/img_1392\n2002/08/13/big/img_776\n2002/08/24/big/img_616\n2002/08/14/big/img_1065\n2002/07/29/big/img_889\n2002/08/18/big/img_188\n2002/08/07/big/img_1453\n2002/08/02/big/img_760\n2002/07/28/big/img_416\n2002/08/07/big/img_1393\n2002/08/26/big/img_292\n2002/08/26/big/img_301\n2003/01/13/big/img_195\n2002/07/26/big/img_532\n2002/08/20/big/img_550\n2002/08/05/big/img_3658\n2002/08/26/big/img_738\n2002/09/02/big/img_15750\n2003/01/17/big/img_451\n2002/07/23/big/img_339\n2002/08/16/big/img_637\n2002/08/14/big/img_748\n2002/08/06/big/img_2739\n2002/07/25/big/img_482\n2002/08/19/big/img_191\n2002/08/26/big/img_537\n2003/01/15/big/img_716\n2003/01/15/big/img_767\n2002/08/02/big/img_452\n2002/08/08/big/img_1011\n2002/08/10/big/img_144\n2003/01/14/big/img_122\n2002/07/24/big/img_586\n2002/07/24/big/img_762\n2002/08/20/big/img_369\n2002/07/30/big/img_146\n2002/08/23/big/img_396\n2003/01/15/big/img_200\n2002/08/15/big/img_1183\n2003/01/14/big/img_698\n2002/08/09/big/img_792\n2002/08/06/big/img_2347\n2002/07/31/big/img_911\n2002/08/26/big/img_722\n2002/08/23/big/img_621\n2002/08/05/big/img_3790\n2003/01/13/big/img_633\n2002/08/09/big/img_224\n2002/07/24/big/img_454\n2002/07/21/big/img_202\n2002/08/02/big/img_630\n2002/08/30/big/img_18315\n2002/07/19/big/img_491\n2002/09/01/big/img_16456\n2002/08/09/big/img_242\n2002/07/25/big/img_595\n2002/07/22/big/img_522\n2002/08/01/big/img_1593\n2002/07/29/big/img_336\n2002/08/15/big/img_448\n2002/08/28/big/img_19281\n2002/07/29/big/img_342\n2002/08/12/big/img_78\n2003/01/14/big/img_525\n2002/07/28/big/img_147\n2002/08/11/big/img_353\n2002/08/22/big/img_513\n2002/08/04/big/img_721\n2002/08/17/big/img_247\n2003/01/14/big/img_891\n2002/08/20/big/img_853\n2002/07/19/big/img_414\n2002/08/01/big/img_1530\n2003/01/14/big/img_924\n2002/08/22/big/img_468\n2002/08/18/big/img_354\n2002/08/30/big/img_18193\n2002/08/23/big/img_492\n2002/08/15/big/img_871\n2002/08/12/big/img_494\n2002/08/06/big/img_2470\n2002/07/23/big/img_923\n2002/08/26/big/img_155\n2002/08/08/big/img_669\n2002/07/23/big/img_404\n2002/08/28/big/img_19421\n2002/08/29/big/img_18993\n2002/08/25/big/img_416\n2003/01/17/big/img_434\n2002/07/29/big/img_1370\n2002/07/28/big/img_483\n2002/08/11/big/img_50\n2002/08/10/big/img_404\n2002/09/02/big/img_15057\n2003/01/14/big/img_911\n2002/09/01/big/img_16697\n2003/01/16/big/img_665\n2002/09/01/big/img_16708\n2002/08/22/big/img_612\n2002/08/28/big/img_19471\n2002/08/02/big/img_198\n2003/01/16/big/img_527\n2002/08/22/big/img_209\n2002/08/30/big/img_18205\n2003/01/14/big/img_114\n2003/01/14/big/img_1028\n2003/01/16/big/img_894\n2003/01/14/big/img_837\n2002/07/30/big/img_9\n2002/08/06/big/img_2821\n2002/08/04/big/img_85\n2003/01/13/big/img_884\n2002/07/22/big/img_570\n2002/08/07/big/img_1773\n2002/07/26/big/img_208\n2003/01/17/big/img_946\n2002/07/19/big/img_930\n2003/01/01/big/img_698\n2003/01/17/big/img_612\n2002/07/19/big/img_372\n2002/07/30/big/img_721\n2003/01/14/big/img_649\n2002/08/19/big/img_4\n2002/07/25/big/img_1024\n2003/01/15/big/img_601\n2002/08/30/big/img_18470\n2002/07/22/big/img_29\n2002/08/07/big/img_1686\n2002/07/20/big/img_294\n2002/08/14/big/img_800\n2002/08/19/big/img_353\n2002/08/19/big/img_350\n2002/08/05/big/img_3392\n2002/08/09/big/img_622\n2003/01/15/big/img_236\n2002/08/11/big/img_643\n2002/08/05/big/img_3458\n2002/08/12/big/img_413\n2002/08/22/big/img_415\n2002/08/13/big/img_635\n2002/08/07/big/img_1198\n2002/08/04/big/img_873\n2002/08/12/big/img_407\n2003/01/15/big/img_346\n2002/08/02/big/img_275\n2002/08/17/big/img_997\n2002/08/21/big/img_958\n2002/08/20/big/img_579\n2002/07/29/big/img_142\n2003/01/14/big/img_1115\n2002/08/16/big/img_365\n2002/07/29/big/img_1414\n2002/08/17/big/img_489\n2002/08/13/big/img_1010\n2002/07/31/big/img_276\n2002/07/25/big/img_1000\n2002/08/23/big/img_524\n2002/08/28/big/img_19147\n2003/01/13/big/img_433\n2002/08/20/big/img_205\n2003/01/01/big/img_458\n2002/07/29/big/img_1449\n2003/01/16/big/img_696\n2002/08/28/big/img_19296\n2002/08/29/big/img_18688\n2002/08/21/big/img_767\n2002/08/20/big/img_532\n2002/08/26/big/img_187\n2002/07/26/big/img_183\n2002/07/27/big/img_890\n2003/01/13/big/img_576\n2002/07/30/big/img_15\n2002/07/31/big/img_889\n2002/08/31/big/img_17759\n2003/01/14/big/img_1114\n2002/07/19/big/img_445\n2002/08/03/big/img_593\n2002/07/24/big/img_750\n2002/07/30/big/img_133\n2002/08/25/big/img_671\n2002/07/20/big/img_351\n2002/08/31/big/img_17276\n2002/08/05/big/img_3231\n2002/09/02/big/img_15882\n2002/08/14/big/img_115\n2002/08/02/big/img_1148\n2002/07/25/big/img_936\n2002/07/31/big/img_639\n2002/08/04/big/img_427\n2002/08/22/big/img_843\n2003/01/17/big/img_17\n2003/01/13/big/img_690\n2002/08/13/big/img_472\n2002/08/09/big/img_425\n2002/08/05/big/img_3450\n2003/01/17/big/img_439\n2002/08/13/big/img_539\n2002/07/28/big/img_35\n2002/08/16/big/img_241\n2002/08/06/big/img_2898\n2003/01/16/big/img_429\n2002/08/05/big/img_3817\n2002/08/27/big/img_19919\n2002/07/19/big/img_422\n2002/08/15/big/img_560\n2002/07/23/big/img_750\n2002/07/30/big/img_353\n2002/08/05/big/img_43\n2002/08/23/big/img_305\n2002/08/01/big/img_2137\n2002/08/30/big/img_18097\n2002/08/01/big/img_1389\n2002/08/02/big/img_308\n2003/01/14/big/img_652\n2002/08/01/big/img_1798\n2003/01/14/big/img_732\n2003/01/16/big/img_294\n2002/08/26/big/img_213\n2002/07/24/big/img_842\n2003/01/13/big/img_630\n2003/01/13/big/img_634\n2002/08/06/big/img_2285\n2002/08/01/big/img_2162\n2002/08/30/big/img_18134\n2002/08/02/big/img_1045\n2002/08/01/big/img_2143\n2002/07/25/big/img_135\n2002/07/20/big/img_645\n2002/08/05/big/img_3666\n2002/08/14/big/img_523\n2002/08/04/big/img_425\n2003/01/14/big/img_137\n2003/01/01/big/img_176\n2002/08/15/big/img_505\n2002/08/24/big/img_386\n2002/08/05/big/img_3187\n2002/08/15/big/img_419\n2003/01/13/big/img_520\n2002/08/04/big/img_444\n2002/08/26/big/img_483\n2002/08/05/big/img_3449\n2002/08/30/big/img_18409\n2002/08/28/big/img_19455\n2002/08/27/big/img_20090\n2002/07/23/big/img_625\n2002/08/24/big/img_205\n2002/08/08/big/img_938\n2003/01/13/big/img_527\n2002/08/07/big/img_1712\n2002/07/24/big/img_801\n2002/08/09/big/img_579\n2003/01/14/big/img_41\n2003/01/15/big/img_1130\n2002/07/21/big/img_672\n2002/08/07/big/img_1590\n2003/01/01/big/img_532\n2002/08/02/big/img_529\n2002/08/05/big/img_3591\n2002/08/23/big/img_5\n2003/01/14/big/img_882\n2002/08/28/big/img_19234\n2002/07/24/big/img_398\n2003/01/14/big/img_592\n2002/08/22/big/img_548\n2002/08/12/big/img_761\n2003/01/16/big/img_497\n2002/08/18/big/img_133\n2002/08/08/big/img_874\n2002/07/19/big/img_247\n2002/08/15/big/img_170\n2002/08/27/big/img_19679\n2002/08/20/big/img_246\n2002/08/24/big/img_358\n2002/07/29/big/img_599\n2002/08/01/big/img_1555\n2002/07/30/big/img_491\n2002/07/30/big/img_371\n2003/01/16/big/img_682\n2002/07/25/big/img_619\n2003/01/15/big/img_587\n2002/08/02/big/img_1212\n2002/08/01/big/img_2152\n2002/07/25/big/img_668\n2003/01/16/big/img_574\n2002/08/28/big/img_19464\n2002/08/11/big/img_536\n2002/07/24/big/img_201\n2002/08/05/big/img_3488\n2002/07/25/big/img_887\n2002/07/22/big/img_789\n2002/07/30/big/img_432\n2002/08/16/big/img_166\n2002/09/01/big/img_16333\n2002/07/26/big/img_1010\n2002/07/21/big/img_793\n2002/07/22/big/img_720\n2002/07/31/big/img_337\n2002/07/27/big/img_185\n2002/08/23/big/img_440\n2002/07/31/big/img_801\n2002/07/25/big/img_478\n2003/01/14/big/img_171\n2002/08/07/big/img_1054\n2002/09/02/big/img_15659\n2002/07/29/big/img_1348\n2002/08/09/big/img_337\n2002/08/26/big/img_684\n2002/07/31/big/img_537\n2002/08/15/big/img_808\n2003/01/13/big/img_740\n2002/08/07/big/img_1667\n2002/08/03/big/img_404\n2002/08/06/big/img_2520\n2002/07/19/big/img_230\n2002/07/19/big/img_356\n2003/01/16/big/img_627\n2002/08/04/big/img_474\n2002/07/29/big/img_833\n2002/07/25/big/img_176\n2002/08/01/big/img_1684\n2002/08/21/big/img_643\n2002/08/27/big/img_19673\n2002/08/02/big/img_838\n2002/08/06/big/img_2378\n2003/01/15/big/img_48\n2002/07/30/big/img_470\n2002/08/15/big/img_963\n2002/08/24/big/img_444\n2002/08/16/big/img_662\n2002/08/15/big/img_1209\n2002/07/24/big/img_25\n2002/08/06/big/img_2740\n2002/07/29/big/img_996\n2002/08/31/big/img_18074\n2002/08/04/big/img_343\n2003/01/17/big/img_509\n2003/01/13/big/img_726\n2002/08/07/big/img_1466\n2002/07/26/big/img_307\n2002/08/10/big/img_598\n2002/08/13/big/img_890\n2002/08/14/big/img_997\n2002/07/19/big/img_392\n2002/08/02/big/img_475\n2002/08/29/big/img_19038\n2002/07/29/big/img_538\n2002/07/29/big/img_502\n2002/08/02/big/img_364\n2002/08/31/big/img_17353\n2002/08/08/big/img_539\n2002/08/01/big/img_1449\n2002/07/22/big/img_363\n2002/08/02/big/img_90\n2002/09/01/big/img_16867\n2002/08/05/big/img_3371\n2002/07/30/big/img_342\n2002/08/07/big/img_1363\n2002/08/22/big/img_790\n2003/01/15/big/img_404\n2002/08/05/big/img_3447\n2002/09/01/big/img_16167\n2003/01/13/big/img_840\n2002/08/22/big/img_1001\n2002/08/09/big/img_431\n2002/07/27/big/img_618\n2002/07/31/big/img_741\n2002/07/30/big/img_964\n2002/07/25/big/img_86\n2002/07/29/big/img_275\n2002/08/21/big/img_921\n2002/07/26/big/img_892\n2002/08/21/big/img_663\n2003/01/13/big/img_567\n2003/01/14/big/img_719\n2002/07/28/big/img_251\n2003/01/15/big/img_1123\n2002/07/29/big/img_260\n2002/08/24/big/img_337\n2002/08/01/big/img_1914\n2002/08/13/big/img_373\n2003/01/15/big/img_589\n2002/08/13/big/img_906\n2002/07/26/big/img_270\n2002/08/26/big/img_313\n2002/08/25/big/img_694\n2003/01/01/big/img_327\n2002/07/23/big/img_261\n2002/08/26/big/img_642\n2002/07/29/big/img_918\n2002/07/23/big/img_455\n2002/07/24/big/img_612\n2002/07/23/big/img_534\n2002/07/19/big/img_534\n2002/07/19/big/img_726\n2002/08/01/big/img_2146\n2002/08/02/big/img_543\n2003/01/16/big/img_777\n2002/07/30/big/img_484\n2002/08/13/big/img_1161\n2002/07/21/big/img_390\n2002/08/06/big/img_2288\n2002/08/21/big/img_677\n2002/08/13/big/img_747\n2002/08/15/big/img_1248\n2002/07/31/big/img_416\n2002/09/02/big/img_15259\n2002/08/16/big/img_781\n2002/08/24/big/img_754\n2002/07/24/big/img_803\n2002/08/20/big/img_609\n2002/08/28/big/img_19571\n2002/09/01/big/img_16140\n2002/08/26/big/img_769\n2002/07/20/big/img_588\n2002/08/02/big/img_898\n2002/07/21/big/img_466\n2002/08/14/big/img_1046\n2002/07/25/big/img_212\n2002/08/26/big/img_353\n2002/08/19/big/img_810\n2002/08/31/big/img_17824\n2002/08/12/big/img_631\n2002/07/19/big/img_828\n2002/07/24/big/img_130\n2002/08/25/big/img_580\n2002/07/31/big/img_699\n2002/07/23/big/img_808\n2002/07/31/big/img_377\n2003/01/16/big/img_570\n2002/09/01/big/img_16254\n2002/07/21/big/img_471\n2002/08/01/big/img_1548\n2002/08/18/big/img_252\n2002/08/19/big/img_576\n2002/08/20/big/img_464\n2002/07/27/big/img_735\n2002/08/21/big/img_589\n2003/01/15/big/img_1192\n2002/08/09/big/img_302\n2002/07/31/big/img_594\n2002/08/23/big/img_19\n2002/08/29/big/img_18819\n2002/08/19/big/img_293\n2002/07/30/big/img_331\n2002/08/23/big/img_607\n2002/07/30/big/img_363\n2002/08/16/big/img_766\n2003/01/13/big/img_481\n2002/08/06/big/img_2515\n2002/09/02/big/img_15913\n2002/09/02/big/img_15827\n2002/09/02/big/img_15053\n2002/08/07/big/img_1576\n2002/07/23/big/img_268\n2002/08/21/big/img_152\n2003/01/15/big/img_578\n2002/07/21/big/img_589\n2002/07/20/big/img_548\n2002/08/27/big/img_19693\n2002/08/31/big/img_17252\n2002/07/31/big/img_138\n2002/07/23/big/img_372\n2002/08/16/big/img_695\n2002/07/27/big/img_287\n2002/08/15/big/img_315\n2002/08/10/big/img_361\n2002/07/29/big/img_899\n2002/08/13/big/img_771\n2002/08/21/big/img_92\n2003/01/15/big/img_425\n2003/01/16/big/img_450\n2002/09/01/big/img_16942\n2002/08/02/big/img_51\n2002/09/02/big/img_15379\n2002/08/24/big/img_147\n2002/08/30/big/img_18122\n2002/07/26/big/img_950\n2002/08/07/big/img_1400\n2002/08/17/big/img_468\n2002/08/15/big/img_470\n2002/07/30/big/img_318\n2002/07/22/big/img_644\n2002/08/27/big/img_19732\n2002/07/23/big/img_601\n2002/08/26/big/img_398\n2002/08/21/big/img_428\n2002/08/06/big/img_2119\n2002/08/29/big/img_19103\n2003/01/14/big/img_933\n2002/08/11/big/img_674\n2002/08/28/big/img_19420\n2002/08/03/big/img_418\n2002/08/17/big/img_312\n2002/07/25/big/img_1044\n2003/01/17/big/img_671\n2002/08/30/big/img_18297\n2002/07/25/big/img_755\n2002/07/23/big/img_471\n2002/08/21/big/img_39\n2002/07/26/big/img_699\n2003/01/14/big/img_33\n2002/07/31/big/img_411\n2002/08/16/big/img_645\n2003/01/17/big/img_116\n2002/09/02/big/img_15903\n2002/08/20/big/img_120\n2002/08/22/big/img_176\n2002/07/29/big/img_1316\n2002/08/27/big/img_19914\n2002/07/22/big/img_719\n2002/08/28/big/img_19239\n2003/01/13/big/img_385\n2002/08/08/big/img_525\n2002/07/19/big/img_782\n2002/08/13/big/img_843\n2002/07/30/big/img_107\n2002/08/11/big/img_752\n2002/07/29/big/img_383\n2002/08/26/big/img_249\n2002/08/29/big/img_18860\n2002/07/30/big/img_70\n2002/07/26/big/img_194\n2002/08/15/big/img_530\n2002/08/08/big/img_816\n2002/07/31/big/img_286\n2003/01/13/big/img_294\n2002/07/31/big/img_251\n2002/07/24/big/img_13\n2002/08/31/big/img_17938\n2002/07/22/big/img_642\n2003/01/14/big/img_728\n2002/08/18/big/img_47\n2002/08/22/big/img_306\n2002/08/20/big/img_348\n2002/08/15/big/img_764\n2002/08/08/big/img_163\n2002/07/23/big/img_531\n2002/07/23/big/img_467\n2003/01/16/big/img_743\n2003/01/13/big/img_535\n2002/08/02/big/img_523\n2002/08/22/big/img_120\n2002/08/11/big/img_496\n2002/08/29/big/img_19075\n2002/08/08/big/img_465\n2002/08/09/big/img_790\n2002/08/19/big/img_588\n2002/08/23/big/img_407\n2003/01/17/big/img_435\n2002/08/24/big/img_398\n2002/08/27/big/img_19899\n2003/01/15/big/img_335\n2002/08/13/big/img_493\n2002/09/02/big/img_15460\n2002/07/31/big/img_470\n2002/08/05/big/img_3550\n2002/07/28/big/img_123\n2002/08/01/big/img_1498\n2002/08/04/big/img_504\n2003/01/17/big/img_427\n2002/08/27/big/img_19708\n2002/07/27/big/img_861\n2002/07/25/big/img_685\n2002/07/31/big/img_207\n2003/01/14/big/img_745\n2002/08/31/big/img_17756\n2002/08/24/big/img_288\n2002/08/18/big/img_181\n2002/08/10/big/img_520\n2002/08/25/big/img_705\n2002/08/23/big/img_226\n2002/08/04/big/img_727\n2002/07/24/big/img_625\n2002/08/28/big/img_19157\n2002/08/23/big/img_586\n2002/07/31/big/img_232\n2003/01/13/big/img_240\n2003/01/14/big/img_321\n2003/01/15/big/img_533\n2002/07/23/big/img_480\n2002/07/24/big/img_371\n2002/08/21/big/img_702\n2002/08/31/big/img_17075\n2002/09/02/big/img_15278\n2002/07/29/big/img_246\n2003/01/15/big/img_829\n2003/01/15/big/img_1213\n2003/01/16/big/img_441\n2002/08/14/big/img_921\n2002/07/23/big/img_425\n2002/08/15/big/img_296\n2002/07/19/big/img_135\n2002/07/26/big/img_402\n2003/01/17/big/img_88\n2002/08/20/big/img_872\n2002/08/13/big/img_1110\n2003/01/16/big/img_1040\n2002/07/23/big/img_9\n2002/08/13/big/img_700\n2002/08/16/big/img_371\n2002/08/27/big/img_19966\n2003/01/17/big/img_391\n2002/08/18/big/img_426\n2002/08/01/big/img_1618\n2002/07/21/big/img_754\n2003/01/14/big/img_1101\n2003/01/16/big/img_1022\n2002/07/22/big/img_275\n2002/08/24/big/img_86\n2002/08/17/big/img_582\n2003/01/15/big/img_765\n2003/01/17/big/img_449\n2002/07/28/big/img_265\n2003/01/13/big/img_552\n2002/07/28/big/img_115\n2003/01/16/big/img_56\n2002/08/02/big/img_1232\n2003/01/17/big/img_925\n2002/07/22/big/img_445\n2002/07/25/big/img_957\n2002/07/20/big/img_589\n2002/08/31/big/img_17107\n2002/07/29/big/img_483\n2002/08/14/big/img_1063\n2002/08/07/big/img_1545\n2002/08/14/big/img_680\n2002/09/01/big/img_16694\n2002/08/14/big/img_257\n2002/08/11/big/img_726\n2002/07/26/big/img_681\n2002/07/25/big/img_481\n2003/01/14/big/img_737\n2002/08/28/big/img_19480\n2003/01/16/big/img_362\n2002/08/27/big/img_19865\n2003/01/01/big/img_547\n2002/09/02/big/img_15074\n2002/08/01/big/img_1453\n2002/08/22/big/img_594\n2002/08/28/big/img_19263\n2002/08/13/big/img_478\n2002/07/29/big/img_1358\n2003/01/14/big/img_1022\n2002/08/16/big/img_450\n2002/08/02/big/img_159\n2002/07/26/big/img_781\n2003/01/13/big/img_601\n2002/08/20/big/img_407\n2002/08/15/big/img_468\n2002/08/31/big/img_17902\n2002/08/16/big/img_81\n2002/07/25/big/img_987\n2002/07/25/big/img_500\n2002/08/02/big/img_31\n2002/08/18/big/img_538\n2002/08/08/big/img_54\n2002/07/23/big/img_686\n2002/07/24/big/img_836\n2003/01/17/big/img_734\n2002/08/16/big/img_1055\n2003/01/16/big/img_521\n2002/07/25/big/img_612\n2002/08/22/big/img_778\n2002/08/03/big/img_251\n2002/08/12/big/img_436\n2002/08/23/big/img_705\n2002/07/28/big/img_243\n2002/07/25/big/img_1029\n2002/08/20/big/img_287\n2002/08/29/big/img_18739\n2002/08/05/big/img_3272\n2002/07/27/big/img_214\n2003/01/14/big/img_5\n2002/08/01/big/img_1380\n2002/08/29/big/img_19097\n2002/07/30/big/img_486\n2002/08/29/big/img_18707\n2002/08/10/big/img_559\n2002/08/15/big/img_365\n2002/08/09/big/img_525\n2002/08/10/big/img_689\n2002/07/25/big/img_502\n2002/08/03/big/img_667\n2002/08/10/big/img_855\n2002/08/10/big/img_706\n2002/08/18/big/img_603\n2003/01/16/big/img_1055\n2002/08/31/big/img_17890\n2002/08/15/big/img_761\n2003/01/15/big/img_489\n2002/08/26/big/img_351\n2002/08/01/big/img_1772\n2002/08/31/big/img_17729\n2002/07/25/big/img_609\n2003/01/13/big/img_539\n2002/07/27/big/img_686\n2002/07/31/big/img_311\n2002/08/22/big/img_799\n2003/01/16/big/img_936\n2002/08/31/big/img_17813\n2002/08/04/big/img_862\n2002/08/09/big/img_332\n2002/07/20/big/img_148\n2002/08/12/big/img_426\n2002/07/24/big/img_69\n2002/07/27/big/img_685\n2002/08/02/big/img_480\n2002/08/26/big/img_154\n2002/07/24/big/img_598\n2002/08/01/big/img_1881\n2002/08/20/big/img_667\n2003/01/14/big/img_495\n2002/07/21/big/img_744\n2002/07/30/big/img_150\n2002/07/23/big/img_924\n2002/08/08/big/img_272\n2002/07/23/big/img_310\n2002/07/25/big/img_1011\n2002/09/02/big/img_15725\n2002/07/19/big/img_814\n2002/08/20/big/img_936\n2002/07/25/big/img_85\n2002/08/24/big/img_662\n2002/08/09/big/img_495\n2003/01/15/big/img_196\n2002/08/16/big/img_707\n2002/08/28/big/img_19370\n2002/08/06/big/img_2366\n2002/08/06/big/img_3012\n2002/08/01/big/img_1452\n2002/07/31/big/img_742\n2002/07/27/big/img_914\n2003/01/13/big/img_290\n2002/07/31/big/img_288\n2002/08/02/big/img_171\n2002/08/22/big/img_191\n2002/07/27/big/img_1066\n2002/08/12/big/img_383\n2003/01/17/big/img_1018\n2002/08/01/big/img_1785\n2002/08/11/big/img_390\n2002/08/27/big/img_20037\n2002/08/12/big/img_38\n2003/01/15/big/img_103\n2002/08/26/big/img_31\n2002/08/18/big/img_660\n2002/07/22/big/img_694\n2002/08/15/big/img_24\n2002/07/27/big/img_1077\n2002/08/01/big/img_1943\n2002/07/22/big/img_292\n2002/09/01/big/img_16857\n2002/07/22/big/img_892\n2003/01/14/big/img_46\n2002/08/09/big/img_469\n2002/08/09/big/img_414\n2003/01/16/big/img_40\n2002/08/28/big/img_19231\n2002/07/27/big/img_978\n2002/07/23/big/img_475\n2002/07/25/big/img_92\n2002/08/09/big/img_799\n2002/07/25/big/img_491\n2002/08/03/big/img_654\n2003/01/15/big/img_687\n2002/08/11/big/img_478\n2002/08/07/big/img_1664\n2002/08/20/big/img_362\n2002/08/01/big/img_1298\n2003/01/13/big/img_500\n2002/08/06/big/img_2896\n2002/08/30/big/img_18529\n2002/08/16/big/img_1020\n2002/07/29/big/img_892\n2002/08/29/big/img_18726\n2002/07/21/big/img_453\n2002/08/17/big/img_437\n2002/07/19/big/img_665\n2002/07/22/big/img_440\n2002/07/19/big/img_582\n2002/07/21/big/img_233\n2003/01/01/big/img_82\n2002/07/25/big/img_341\n2002/07/29/big/img_864\n2002/08/02/big/img_276\n2002/08/29/big/img_18654\n2002/07/27/big/img_1024\n2002/08/19/big/img_373\n2003/01/15/big/img_241\n2002/07/25/big/img_84\n2002/08/13/big/img_834\n2002/08/10/big/img_511\n2002/08/01/big/img_1627\n2002/08/08/big/img_607\n2002/08/06/big/img_2083\n2002/08/01/big/img_1486\n2002/08/08/big/img_700\n2002/08/01/big/img_1954\n2002/08/21/big/img_54\n2002/07/30/big/img_847\n2002/08/28/big/img_19169\n2002/07/21/big/img_549\n2002/08/03/big/img_693\n2002/07/31/big/img_1002\n2003/01/14/big/img_1035\n2003/01/16/big/img_622\n2002/07/30/big/img_1201\n2002/08/10/big/img_444\n2002/07/31/big/img_374\n2002/08/21/big/img_301\n2002/08/13/big/img_1095\n2003/01/13/big/img_288\n2002/07/25/big/img_232\n2003/01/13/big/img_967\n2002/08/26/big/img_360\n2002/08/05/big/img_67\n2002/08/29/big/img_18969\n2002/07/28/big/img_16\n2002/08/16/big/img_515\n2002/07/20/big/img_708\n2002/08/18/big/img_178\n2003/01/15/big/img_509\n2002/07/25/big/img_430\n2002/08/21/big/img_738\n2002/08/16/big/img_886\n2002/09/02/big/img_15605\n2002/09/01/big/img_16242\n2002/08/24/big/img_711\n2002/07/25/big/img_90\n2002/08/09/big/img_491\n2002/07/30/big/img_534\n2003/01/13/big/img_474\n2002/08/25/big/img_510\n2002/08/15/big/img_555\n2002/08/02/big/img_775\n2002/07/23/big/img_975\n2002/08/19/big/img_229\n2003/01/17/big/img_860\n2003/01/02/big/img_10\n2002/07/23/big/img_542\n2002/08/06/big/img_2535\n2002/07/22/big/img_37\n2002/08/06/big/img_2342\n2002/08/25/big/img_515\n2002/08/25/big/img_336\n2002/08/18/big/img_837\n2002/08/21/big/img_616\n2003/01/17/big/img_24\n2002/07/26/big/img_936\n2002/08/14/big/img_896\n2002/07/29/big/img_465\n2002/07/31/big/img_543\n2002/08/01/big/img_1411\n2002/08/02/big/img_423\n2002/08/21/big/img_44\n2002/07/31/big/img_11\n2003/01/15/big/img_628\n2003/01/15/big/img_605\n2002/07/30/big/img_571\n2002/07/23/big/img_428\n2002/08/15/big/img_942\n2002/07/26/big/img_531\n2003/01/16/big/img_59\n2002/08/02/big/img_410\n2002/07/31/big/img_230\n2002/08/19/big/img_806\n2003/01/14/big/img_462\n2002/08/16/big/img_370\n2002/08/13/big/img_380\n2002/08/16/big/img_932\n2002/07/19/big/img_393\n2002/08/20/big/img_764\n2002/08/15/big/img_616\n2002/07/26/big/img_267\n2002/07/27/big/img_1069\n2002/08/14/big/img_1041\n2003/01/13/big/img_594\n2002/09/01/big/img_16845\n2002/08/09/big/img_229\n2003/01/16/big/img_639\n2002/08/19/big/img_398\n2002/08/18/big/img_978\n2002/08/24/big/img_296\n2002/07/29/big/img_415\n2002/07/30/big/img_923\n2002/08/18/big/img_575\n2002/08/22/big/img_182\n2002/07/25/big/img_806\n2002/07/22/big/img_49\n2002/07/29/big/img_989\n2003/01/17/big/img_789\n2003/01/15/big/img_503\n2002/09/01/big/img_16062\n2003/01/17/big/img_794\n2002/08/15/big/img_564\n2003/01/15/big/img_222\n2002/08/01/big/img_1656\n2003/01/13/big/img_432\n2002/07/19/big/img_426\n2002/08/17/big/img_244\n2002/08/13/big/img_805\n2002/09/02/big/img_15067\n2002/08/11/big/img_58\n2002/08/22/big/img_636\n2002/07/22/big/img_416\n2002/08/13/big/img_836\n2002/08/26/big/img_363\n2002/07/30/big/img_917\n2003/01/14/big/img_206\n2002/08/12/big/img_311\n2002/08/31/big/img_17623\n2002/07/29/big/img_661\n2003/01/13/big/img_417\n2002/08/02/big/img_463\n2002/08/02/big/img_669\n2002/08/26/big/img_670\n2002/08/02/big/img_375\n2002/07/19/big/img_209\n2002/08/08/big/img_115\n2002/08/21/big/img_399\n2002/08/20/big/img_911\n2002/08/07/big/img_1212\n2002/08/20/big/img_578\n2002/08/22/big/img_554\n2002/08/21/big/img_484\n2002/07/25/big/img_450\n2002/08/03/big/img_542\n2002/08/15/big/img_561\n2002/07/23/big/img_360\n2002/08/30/big/img_18137\n2002/07/25/big/img_250\n2002/08/03/big/img_647\n2002/08/20/big/img_375\n2002/08/14/big/img_387\n2002/09/01/big/img_16990\n2002/08/28/big/img_19341\n2003/01/15/big/img_239\n2002/08/20/big/img_528\n2002/08/12/big/img_130\n2002/09/02/big/img_15108\n2003/01/15/big/img_372\n2002/08/16/big/img_678\n2002/08/04/big/img_623\n2002/07/23/big/img_477\n2002/08/28/big/img_19590\n2003/01/17/big/img_978\n2002/09/01/big/img_16692\n2002/07/20/big/img_109\n2002/08/06/big/img_2660\n2003/01/14/big/img_464\n2002/08/09/big/img_618\n2002/07/22/big/img_722\n2002/08/25/big/img_419\n2002/08/03/big/img_314\n2002/08/25/big/img_40\n2002/07/27/big/img_430\n2002/08/10/big/img_569\n2002/08/23/big/img_398\n2002/07/23/big/img_893\n2002/08/16/big/img_261\n2002/08/06/big/img_2668\n2002/07/22/big/img_835\n2002/09/02/big/img_15093\n2003/01/16/big/img_65\n2002/08/21/big/img_448\n2003/01/14/big/img_351\n2003/01/17/big/img_133\n2002/07/28/big/img_493\n2003/01/15/big/img_640\n2002/09/01/big/img_16880\n2002/08/15/big/img_350\n2002/08/20/big/img_624\n2002/08/25/big/img_604\n2002/08/06/big/img_2200\n2002/08/23/big/img_290\n2002/08/13/big/img_1152\n2003/01/14/big/img_251\n2002/08/02/big/img_538\n2002/08/22/big/img_613\n2003/01/13/big/img_351\n2002/08/18/big/img_368\n2002/07/23/big/img_392\n2002/07/25/big/img_198\n2002/07/25/big/img_418\n2002/08/26/big/img_614\n2002/07/23/big/img_405\n2003/01/14/big/img_445\n2002/07/25/big/img_326\n2002/08/10/big/img_734\n2003/01/14/big/img_530\n2002/08/08/big/img_561\n2002/08/29/big/img_18990\n2002/08/10/big/img_576\n2002/07/29/big/img_1494\n2002/07/19/big/img_198\n2002/08/10/big/img_562\n2002/07/22/big/img_901\n2003/01/14/big/img_37\n2002/09/02/big/img_15629\n2003/01/14/big/img_58\n2002/08/01/big/img_1364\n2002/07/27/big/img_636\n2003/01/13/big/img_241\n2002/09/01/big/img_16988\n2003/01/13/big/img_560\n2002/08/09/big/img_533\n2002/07/31/big/img_249\n2003/01/17/big/img_1007\n2002/07/21/big/img_64\n2003/01/13/big/img_537\n2003/01/15/big/img_606\n2002/08/18/big/img_651\n2002/08/24/big/img_405\n2002/07/26/big/img_837\n2002/08/09/big/img_562\n2002/08/01/big/img_1983\n2002/08/03/big/img_514\n2002/07/29/big/img_314\n2002/08/12/big/img_493\n2003/01/14/big/img_121\n2003/01/14/big/img_479\n2002/08/04/big/img_410\n2002/07/22/big/img_607\n2003/01/17/big/img_417\n2002/07/20/big/img_547\n2002/08/13/big/img_396\n2002/08/31/big/img_17538\n2002/08/13/big/img_187\n2002/08/12/big/img_328\n2003/01/14/big/img_569\n2002/07/27/big/img_1081\n2002/08/14/big/img_504\n2002/08/23/big/img_785\n2002/07/26/big/img_339\n2002/08/07/big/img_1156\n2002/08/07/big/img_1456\n2002/08/23/big/img_378\n2002/08/27/big/img_19719\n2002/07/31/big/img_39\n2002/07/31/big/img_883\n2003/01/14/big/img_676\n2002/07/29/big/img_214\n2002/07/26/big/img_669\n2002/07/25/big/img_202\n2002/08/08/big/img_259\n2003/01/17/big/img_943\n2003/01/15/big/img_512\n2002/08/05/big/img_3295\n2002/08/27/big/img_19685\n2002/08/08/big/img_277\n2002/08/30/big/img_18154\n2002/07/22/big/img_663\n2002/08/29/big/img_18914\n2002/07/31/big/img_908\n2002/08/27/big/img_19926\n2003/01/13/big/img_791\n2003/01/15/big/img_827\n2002/08/18/big/img_878\n2002/08/14/big/img_670\n2002/07/20/big/img_182\n2002/08/15/big/img_291\n2002/08/06/big/img_2600\n2002/07/23/big/img_587\n2002/08/14/big/img_577\n2003/01/15/big/img_585\n2002/07/30/big/img_310\n2002/08/03/big/img_658\n2002/08/10/big/img_157\n2002/08/19/big/img_811\n2002/07/29/big/img_1318\n2002/08/04/big/img_104\n2002/07/30/big/img_332\n2002/07/24/big/img_789\n2002/07/29/big/img_516\n2002/07/23/big/img_843\n2002/08/01/big/img_1528\n2002/08/13/big/img_798\n2002/08/07/big/img_1729\n2002/08/28/big/img_19448\n2003/01/16/big/img_95\n2002/08/12/big/img_473\n2002/07/27/big/img_269\n2003/01/16/big/img_621\n2002/07/29/big/img_772\n2002/07/24/big/img_171\n2002/07/19/big/img_429\n2002/08/07/big/img_1933\n2002/08/27/big/img_19629\n2002/08/05/big/img_3688\n2002/08/07/big/img_1691\n2002/07/23/big/img_600\n2002/07/29/big/img_666\n2002/08/25/big/img_566\n2002/08/06/big/img_2659\n2002/08/29/big/img_18929\n2002/08/16/big/img_407\n2002/08/18/big/img_774\n2002/08/19/big/img_249\n2002/08/06/big/img_2427\n2002/08/29/big/img_18899\n2002/08/01/big/img_1818\n2002/07/31/big/img_108\n2002/07/29/big/img_500\n2002/08/11/big/img_115\n2002/07/19/big/img_521\n2002/08/02/big/img_1163\n2002/07/22/big/img_62\n2002/08/13/big/img_466\n2002/08/21/big/img_956\n2002/08/23/big/img_602\n2002/08/20/big/img_858\n2002/07/25/big/img_690\n2002/07/19/big/img_130\n2002/08/04/big/img_874\n2002/07/26/big/img_489\n2002/07/22/big/img_548\n2002/08/10/big/img_191\n2002/07/25/big/img_1051\n2002/08/18/big/img_473\n2002/08/12/big/img_755\n2002/08/18/big/img_413\n2002/08/08/big/img_1044\n2002/08/17/big/img_680\n2002/08/26/big/img_235\n2002/08/20/big/img_330\n2002/08/22/big/img_344\n2002/08/09/big/img_593\n2002/07/31/big/img_1006\n2002/08/14/big/img_337\n2002/08/16/big/img_728\n2002/07/24/big/img_834\n2002/08/04/big/img_552\n2002/09/02/big/img_15213\n2002/07/25/big/img_725\n2002/08/30/big/img_18290\n2003/01/01/big/img_475\n2002/07/27/big/img_1083\n2002/08/29/big/img_18955\n2002/08/31/big/img_17232\n2002/08/08/big/img_480\n2002/08/01/big/img_1311\n2002/07/30/big/img_745\n2002/08/03/big/img_649\n2002/08/12/big/img_193\n2002/07/29/big/img_228\n2002/07/25/big/img_836\n2002/08/20/big/img_400\n2002/07/30/big/img_507\n2002/09/02/big/img_15072\n2002/07/26/big/img_658\n2002/07/28/big/img_503\n2002/08/05/big/img_3814\n2002/08/24/big/img_745\n2003/01/13/big/img_817\n2002/08/08/big/img_579\n2002/07/22/big/img_251\n2003/01/13/big/img_689\n2002/07/25/big/img_407\n2002/08/13/big/img_1050\n2002/08/14/big/img_733\n2002/07/24/big/img_82\n2003/01/17/big/img_288\n2003/01/15/big/img_475\n2002/08/14/big/img_620\n2002/08/21/big/img_167\n2002/07/19/big/img_300\n2002/07/26/big/img_219\n2002/08/01/big/img_1468\n2002/07/23/big/img_260\n2002/08/09/big/img_555\n2002/07/19/big/img_160\n2002/08/02/big/img_1060\n2003/01/14/big/img_149\n2002/08/15/big/img_346\n2002/08/24/big/img_597\n2002/08/22/big/img_502\n2002/08/30/big/img_18228\n2002/07/21/big/img_766\n2003/01/15/big/img_841\n2002/07/24/big/img_516\n2002/08/02/big/img_265\n2002/08/15/big/img_1243\n2003/01/15/big/img_223\n2002/08/04/big/img_236\n2002/07/22/big/img_309\n2002/07/20/big/img_656\n2002/07/31/big/img_412\n2002/09/01/big/img_16462\n2003/01/16/big/img_431\n2002/07/22/big/img_793\n2002/08/15/big/img_877\n2002/07/26/big/img_282\n2002/07/25/big/img_529\n2002/08/24/big/img_613\n2003/01/17/big/img_700\n2002/08/06/big/img_2526\n2002/08/24/big/img_394\n2002/08/21/big/img_521\n2002/08/25/big/img_560\n2002/07/29/big/img_966\n2002/07/25/big/img_448\n2003/01/13/big/img_782\n2002/08/21/big/img_296\n2002/09/01/big/img_16755\n2002/08/05/big/img_3552\n2002/09/02/big/img_15823\n2003/01/14/big/img_193\n2002/07/21/big/img_159\n2002/08/02/big/img_564\n2002/08/16/big/img_300\n2002/07/19/big/img_269\n2002/08/13/big/img_676\n2002/07/28/big/img_57\n2002/08/05/big/img_3318\n2002/07/31/big/img_218\n2002/08/21/big/img_898\n2002/07/29/big/img_109\n2002/07/19/big/img_854\n2002/08/23/big/img_311\n2002/08/14/big/img_318\n2002/07/25/big/img_523\n2002/07/21/big/img_678\n2003/01/17/big/img_690\n2002/08/28/big/img_19503\n2002/08/18/big/img_251\n2002/08/22/big/img_672\n2002/08/20/big/img_663\n2002/08/02/big/img_148\n2002/09/02/big/img_15580\n2002/07/25/big/img_778\n2002/08/14/big/img_565\n2002/08/12/big/img_374\n2002/08/13/big/img_1018\n2002/08/20/big/img_474\n2002/08/25/big/img_33\n2002/08/02/big/img_1190\n2002/08/08/big/img_864\n2002/08/14/big/img_1071\n2002/08/30/big/img_18103\n2002/08/18/big/img_533\n2003/01/16/big/img_650\n2002/07/25/big/img_108\n2002/07/26/big/img_81\n2002/07/27/big/img_543\n2002/07/29/big/img_521\n2003/01/13/big/img_434\n2002/08/26/big/img_674\n2002/08/06/big/img_2932\n2002/08/07/big/img_1262\n2003/01/15/big/img_201\n2003/01/16/big/img_673\n2002/09/02/big/img_15988\n2002/07/29/big/img_1306\n2003/01/14/big/img_1072\n2002/08/30/big/img_18232\n2002/08/05/big/img_3711\n2002/07/23/big/img_775\n2002/08/01/big/img_16\n2003/01/16/big/img_630\n2002/08/22/big/img_695\n2002/08/14/big/img_51\n2002/08/14/big/img_782\n2002/08/24/big/img_742\n2003/01/14/big/img_512\n2003/01/15/big/img_1183\n2003/01/15/big/img_714\n2002/08/01/big/img_2078\n2002/07/31/big/img_682\n2002/09/02/big/img_15687\n2002/07/26/big/img_518\n2002/08/27/big/img_19676\n2002/09/02/big/img_15969\n2002/08/02/big/img_931\n2002/08/25/big/img_508\n2002/08/29/big/img_18616\n2002/07/22/big/img_839\n2002/07/28/big/img_313\n2003/01/14/big/img_155\n2002/08/02/big/img_1105\n2002/08/09/big/img_53\n2002/08/16/big/img_469\n2002/08/15/big/img_502\n2002/08/20/big/img_575\n2002/07/25/big/img_138\n2003/01/16/big/img_579\n2002/07/19/big/img_352\n2003/01/14/big/img_762\n2003/01/01/big/img_588\n2002/08/02/big/img_981\n2002/08/21/big/img_447\n2002/09/01/big/img_16151\n2003/01/14/big/img_769\n2002/08/23/big/img_461\n2002/08/17/big/img_240\n2002/09/02/big/img_15220\n2002/07/19/big/img_408\n2002/09/02/big/img_15496\n2002/07/29/big/img_758\n2002/08/28/big/img_19392\n2002/08/06/big/img_2723\n2002/08/31/big/img_17752\n2002/08/23/big/img_469\n2002/08/13/big/img_515\n2002/09/02/big/img_15551\n2002/08/03/big/img_462\n2002/07/24/big/img_613\n2002/07/22/big/img_61\n2002/08/08/big/img_171\n2002/08/21/big/img_177\n2003/01/14/big/img_105\n2002/08/02/big/img_1017\n2002/08/22/big/img_106\n2002/07/27/big/img_542\n2002/07/21/big/img_665\n2002/07/23/big/img_595\n2002/08/04/big/img_657\n2002/08/29/big/img_19002\n2003/01/15/big/img_550\n2002/08/14/big/img_662\n2002/07/20/big/img_425\n2002/08/30/big/img_18528\n2002/07/26/big/img_611\n2002/07/22/big/img_849\n2002/08/07/big/img_1655\n2002/08/21/big/img_638\n2003/01/17/big/img_732\n2003/01/01/big/img_496\n2002/08/18/big/img_713\n2002/08/08/big/img_109\n2002/07/27/big/img_1008\n2002/07/20/big/img_559\n2002/08/16/big/img_699\n2002/08/31/big/img_17702\n2002/07/31/big/img_1013\n2002/08/01/big/img_2027\n2002/08/02/big/img_1001\n2002/08/03/big/img_210\n2002/08/01/big/img_2087\n2003/01/14/big/img_199\n2002/07/29/big/img_48\n2002/07/19/big/img_727\n2002/08/09/big/img_249\n2002/08/04/big/img_632\n2002/08/22/big/img_620\n2003/01/01/big/img_457\n2002/08/05/big/img_3223\n2002/07/27/big/img_240\n2002/07/25/big/img_797\n2002/08/13/big/img_430\n2002/07/25/big/img_615\n2002/08/12/big/img_28\n2002/07/30/big/img_220\n2002/07/24/big/img_89\n2002/08/21/big/img_357\n2002/08/09/big/img_590\n2003/01/13/big/img_525\n2002/08/17/big/img_818\n2003/01/02/big/img_7\n2002/07/26/big/img_636\n2003/01/13/big/img_1122\n2002/07/23/big/img_810\n2002/08/20/big/img_888\n2002/07/27/big/img_3\n2002/08/15/big/img_451\n2002/09/02/big/img_15787\n2002/07/31/big/img_281\n2002/08/05/big/img_3274\n2002/08/07/big/img_1254\n2002/07/31/big/img_27\n2002/08/01/big/img_1366\n2002/07/30/big/img_182\n2002/08/27/big/img_19690\n2002/07/29/big/img_68\n2002/08/23/big/img_754\n2002/07/30/big/img_540\n2002/08/27/big/img_20063\n2002/08/14/big/img_471\n2002/08/02/big/img_615\n2002/07/30/big/img_186\n2002/08/25/big/img_150\n2002/07/27/big/img_626\n2002/07/20/big/img_225\n2003/01/15/big/img_1252\n2002/07/19/big/img_367\n2003/01/15/big/img_582\n2002/08/09/big/img_572\n2002/08/08/big/img_428\n2003/01/15/big/img_639\n2002/08/28/big/img_19245\n2002/07/24/big/img_321\n2002/08/02/big/img_662\n2002/08/08/big/img_1033\n2003/01/17/big/img_867\n2002/07/22/big/img_652\n2003/01/14/big/img_224\n2002/08/18/big/img_49\n2002/07/26/big/img_46\n2002/08/31/big/img_18021\n2002/07/25/big/img_151\n2002/08/23/big/img_540\n2002/08/25/big/img_693\n2002/07/23/big/img_340\n2002/07/28/big/img_117\n2002/09/02/big/img_15768\n2002/08/26/big/img_562\n2002/07/24/big/img_480\n2003/01/15/big/img_341\n2002/08/10/big/img_783\n2002/08/20/big/img_132\n2003/01/14/big/img_370\n2002/07/20/big/img_720\n2002/08/03/big/img_144\n2002/08/20/big/img_538\n2002/08/01/big/img_1745\n2002/08/11/big/img_683\n2002/08/03/big/img_328\n2002/08/10/big/img_793\n2002/08/14/big/img_689\n2002/08/02/big/img_162\n2003/01/17/big/img_411\n2002/07/31/big/img_361\n2002/08/15/big/img_289\n2002/08/08/big/img_254\n2002/08/15/big/img_996\n2002/08/20/big/img_785\n2002/07/24/big/img_511\n2002/08/06/big/img_2614\n2002/08/29/big/img_18733\n2002/08/17/big/img_78\n2002/07/30/big/img_378\n2002/08/31/big/img_17947\n2002/08/26/big/img_88\n2002/07/30/big/img_558\n2002/08/02/big/img_67\n2003/01/14/big/img_325\n2002/07/29/big/img_1357\n2002/07/19/big/img_391\n2002/07/30/big/img_307\n2003/01/13/big/img_219\n2002/07/24/big/img_807\n2002/08/23/big/img_543\n2002/08/29/big/img_18620\n2002/07/22/big/img_769\n2002/08/26/big/img_503\n2002/07/30/big/img_78\n2002/08/14/big/img_1036\n2002/08/09/big/img_58\n2002/07/24/big/img_616\n2002/08/02/big/img_464\n2002/07/26/big/img_576\n2002/07/22/big/img_273\n2003/01/16/big/img_470\n2002/07/29/big/img_329\n2002/07/30/big/img_1086\n2002/07/31/big/img_353\n2002/09/02/big/img_15275\n2003/01/17/big/img_555\n2002/08/26/big/img_212\n2002/08/01/big/img_1692\n2003/01/15/big/img_600\n2002/07/29/big/img_825\n2002/08/08/big/img_68\n2002/08/10/big/img_719\n2002/07/31/big/img_636\n2002/07/29/big/img_325\n2002/07/21/big/img_515\n2002/07/22/big/img_705\n2003/01/13/big/img_818\n2002/08/09/big/img_486\n2002/08/22/big/img_141\n2002/07/22/big/img_303\n2002/08/09/big/img_393\n2002/07/29/big/img_963\n2002/08/02/big/img_1215\n2002/08/19/big/img_674\n2002/08/12/big/img_690\n2002/08/21/big/img_637\n2002/08/21/big/img_841\n2002/08/24/big/img_71\n2002/07/25/big/img_596\n2002/07/24/big/img_864\n2002/08/18/big/img_293\n2003/01/14/big/img_657\n2002/08/15/big/img_411\n2002/08/16/big/img_348\n2002/08/05/big/img_3157\n2002/07/20/big/img_663\n2003/01/13/big/img_654\n2003/01/16/big/img_433\n2002/08/30/big/img_18200\n2002/08/12/big/img_226\n2003/01/16/big/img_491\n2002/08/08/big/img_666\n2002/07/19/big/img_576\n2003/01/15/big/img_776\n2003/01/16/big/img_899\n2002/07/19/big/img_397\n2002/08/14/big/img_44\n2003/01/15/big/img_762\n2002/08/02/big/img_982\n2002/09/02/big/img_15234\n2002/08/17/big/img_556\n2002/08/21/big/img_410\n2002/08/21/big/img_386\n2002/07/19/big/img_690\n2002/08/05/big/img_3052\n2002/08/14/big/img_219\n2002/08/16/big/img_273\n2003/01/15/big/img_752\n2002/08/08/big/img_184\n2002/07/31/big/img_743\n2002/08/23/big/img_338\n2003/01/14/big/img_1055\n2002/08/05/big/img_3405\n2003/01/15/big/img_17\n2002/08/03/big/img_141\n2002/08/14/big/img_549\n2002/07/27/big/img_1034\n2002/07/31/big/img_932\n2002/08/30/big/img_18487\n2002/09/02/big/img_15814\n2002/08/01/big/img_2086\n2002/09/01/big/img_16535\n2002/07/22/big/img_500\n2003/01/13/big/img_400\n2002/08/25/big/img_607\n2002/08/30/big/img_18384\n2003/01/14/big/img_951\n2002/08/13/big/img_1150\n2002/08/08/big/img_1022\n2002/08/10/big/img_428\n2002/08/28/big/img_19242\n2002/08/05/big/img_3098\n2002/07/23/big/img_400\n2002/08/26/big/img_365\n2002/07/20/big/img_318\n2002/08/13/big/img_740\n2003/01/16/big/img_37\n2002/08/26/big/img_274\n2002/08/02/big/img_205\n2002/08/21/big/img_695\n2002/08/06/big/img_2289\n2002/08/20/big/img_794\n2002/08/18/big/img_438\n2002/08/07/big/img_1380\n2002/08/02/big/img_737\n2002/08/07/big/img_1651\n2002/08/15/big/img_1238\n2002/08/01/big/img_1681\n2002/08/06/big/img_3017\n2002/07/23/big/img_706\n2002/07/31/big/img_392\n2002/08/09/big/img_539\n2002/07/29/big/img_835\n2002/08/26/big/img_723\n2002/08/28/big/img_19235\n2003/01/16/big/img_353\n2002/08/10/big/img_150\n2002/08/29/big/img_19025\n2002/08/21/big/img_310\n2002/08/10/big/img_823\n2002/07/26/big/img_981\n2002/08/11/big/img_288\n2002/08/19/big/img_534\n2002/08/21/big/img_300\n2002/07/31/big/img_49\n2002/07/30/big/img_469\n2002/08/28/big/img_19197\n2002/08/25/big/img_205\n2002/08/10/big/img_390\n2002/08/23/big/img_291\n2002/08/26/big/img_230\n2002/08/18/big/img_76\n2002/07/23/big/img_409\n2002/08/14/big/img_1053\n2003/01/14/big/img_291\n2002/08/10/big/img_503\n2002/08/27/big/img_19928\n2002/08/03/big/img_563\n2002/08/17/big/img_250\n2002/08/06/big/img_2381\n2002/08/17/big/img_948\n2002/08/06/big/img_2710\n2002/07/22/big/img_696\n2002/07/31/big/img_670\n2002/08/12/big/img_594\n2002/07/29/big/img_624\n2003/01/17/big/img_934\n2002/08/03/big/img_584\n2002/08/22/big/img_1003\n2002/08/05/big/img_3396\n2003/01/13/big/img_570\n2002/08/02/big/img_219\n2002/09/02/big/img_15774\n2002/08/16/big/img_818\n2002/08/23/big/img_402\n2003/01/14/big/img_552\n2002/07/29/big/img_71\n2002/08/05/big/img_3592\n2002/08/16/big/img_80\n2002/07/27/big/img_672\n2003/01/13/big/img_470\n2003/01/16/big/img_702\n2002/09/01/big/img_16130\n2002/08/08/big/img_240\n2002/09/01/big/img_16338\n2002/07/26/big/img_312\n2003/01/14/big/img_538\n2002/07/20/big/img_695\n2002/08/30/big/img_18098\n2002/08/25/big/img_259\n2002/08/16/big/img_1042\n2002/08/09/big/img_837\n2002/08/31/big/img_17760\n2002/07/31/big/img_14\n2002/08/09/big/img_361\n2003/01/16/big/img_107\n2002/08/14/big/img_124\n2002/07/19/big/img_463\n2003/01/15/big/img_275\n2002/07/25/big/img_1151\n2002/07/29/big/img_1501\n2002/08/27/big/img_19889\n2002/08/29/big/img_18603\n2003/01/17/big/img_601\n2002/08/25/big/img_355\n2002/08/08/big/img_297\n2002/08/20/big/img_290\n2002/07/31/big/img_195\n2003/01/01/big/img_336\n2002/08/18/big/img_369\n2002/07/25/big/img_621\n2002/08/11/big/img_508\n2003/01/14/big/img_458\n2003/01/15/big/img_795\n2002/08/12/big/img_498\n2002/08/01/big/img_1734\n2002/08/02/big/img_246\n2002/08/16/big/img_565\n2002/08/11/big/img_475\n2002/08/22/big/img_408\n2002/07/28/big/img_78\n2002/07/21/big/img_81\n2003/01/14/big/img_697\n2002/08/14/big/img_661\n2002/08/15/big/img_507\n2002/08/19/big/img_55\n2002/07/22/big/img_152\n2003/01/14/big/img_470\n2002/08/03/big/img_379\n2002/08/22/big/img_506\n2003/01/16/big/img_966\n2002/08/18/big/img_698\n2002/08/24/big/img_528\n2002/08/23/big/img_10\n2002/08/01/big/img_1655\n2002/08/22/big/img_953\n2002/07/19/big/img_630\n2002/07/22/big/img_889\n2002/08/16/big/img_351\n2003/01/16/big/img_83\n2002/07/19/big/img_805\n2002/08/14/big/img_704\n2002/07/19/big/img_389\n2002/08/31/big/img_17765\n2002/07/29/big/img_606\n2003/01/17/big/img_939\n2002/09/02/big/img_15081\n2002/08/21/big/img_181\n2002/07/29/big/img_1321\n2002/07/21/big/img_497\n2002/07/20/big/img_539\n2002/08/24/big/img_119\n2002/08/01/big/img_1281\n2002/07/26/big/img_207\n2002/07/26/big/img_432\n2002/07/27/big/img_1006\n2002/08/05/big/img_3087\n2002/08/14/big/img_252\n2002/08/14/big/img_798\n2002/07/24/big/img_538\n2002/09/02/big/img_15507\n2002/08/08/big/img_901\n2003/01/14/big/img_557\n2002/08/07/big/img_1819\n2002/08/04/big/img_470\n2002/08/01/big/img_1504\n2002/08/16/big/img_1070\n2002/08/16/big/img_372\n2002/08/23/big/img_416\n2002/08/30/big/img_18208\n2002/08/01/big/img_2043\n2002/07/22/big/img_385\n2002/08/22/big/img_466\n2002/08/21/big/img_869\n2002/08/28/big/img_19429\n2002/08/02/big/img_770\n2002/07/23/big/img_433\n2003/01/14/big/img_13\n2002/07/27/big/img_953\n2002/09/02/big/img_15728\n2002/08/01/big/img_1361\n2002/08/29/big/img_18897\n2002/08/26/big/img_534\n2002/08/11/big/img_121\n2002/08/26/big/img_20130\n2002/07/31/big/img_363\n2002/08/13/big/img_978\n2002/07/25/big/img_835\n2002/08/02/big/img_906\n2003/01/14/big/img_548\n2002/07/30/big/img_80\n2002/07/26/big/img_982\n2003/01/16/big/img_99\n2002/08/19/big/img_362\n2002/08/24/big/img_376\n2002/08/07/big/img_1264\n2002/07/27/big/img_938\n2003/01/17/big/img_535\n2002/07/26/big/img_457\n2002/08/08/big/img_848\n2003/01/15/big/img_859\n2003/01/15/big/img_622\n2002/07/30/big/img_403\n2002/07/29/big/img_217\n2002/07/26/big/img_891\n2002/07/24/big/img_70\n2002/08/25/big/img_619\n2002/08/05/big/img_3375\n2002/08/01/big/img_2160\n2002/08/06/big/img_2227\n2003/01/14/big/img_117\n2002/08/14/big/img_227\n2002/08/13/big/img_565\n2002/08/19/big/img_625\n2002/08/03/big/img_812\n2002/07/24/big/img_41\n2002/08/16/big/img_235\n2002/07/29/big/img_759\n2002/07/21/big/img_433\n2002/07/29/big/img_190\n2003/01/16/big/img_435\n2003/01/13/big/img_708\n2002/07/30/big/img_57\n2002/08/22/big/img_162\n2003/01/01/big/img_558\n2003/01/15/big/img_604\n2002/08/16/big/img_935\n2002/08/20/big/img_394\n2002/07/28/big/img_465\n2002/09/02/big/img_15534\n2002/08/16/big/img_87\n2002/07/22/big/img_469\n2002/08/12/big/img_245\n2003/01/13/big/img_236\n2002/08/06/big/img_2736\n2002/08/03/big/img_348\n2003/01/14/big/img_218\n2002/07/26/big/img_232\n2003/01/15/big/img_244\n2002/07/25/big/img_1121\n2002/08/01/big/img_1484\n2002/07/26/big/img_541\n2002/08/07/big/img_1244\n2002/07/31/big/img_3\n2002/08/30/big/img_18437\n2002/08/29/big/img_19094\n2002/08/01/big/img_1355\n2002/08/19/big/img_338\n2002/07/19/big/img_255\n2002/07/21/big/img_76\n2002/08/25/big/img_199\n2002/08/12/big/img_740\n2002/07/30/big/img_852\n2002/08/15/big/img_599\n2002/08/23/big/img_254\n2002/08/19/big/img_125\n2002/07/24/big/img_2\n2002/08/04/big/img_145\n2002/08/05/big/img_3137\n2002/07/28/big/img_463\n2003/01/14/big/img_801\n2002/07/23/big/img_366\n2002/08/26/big/img_600\n2002/08/26/big/img_649\n2002/09/02/big/img_15849\n2002/07/26/big/img_248\n2003/01/13/big/img_200\n2002/08/07/big/img_1794\n2002/08/31/big/img_17270\n2002/08/23/big/img_608\n2003/01/13/big/img_837\n2002/08/23/big/img_581\n2002/08/20/big/img_754\n2002/08/18/big/img_183\n2002/08/20/big/img_328\n2002/07/22/big/img_494\n2002/07/29/big/img_399\n2002/08/28/big/img_19284\n2002/08/08/big/img_566\n2002/07/25/big/img_376\n2002/07/23/big/img_138\n2002/07/25/big/img_435\n2002/08/17/big/img_685\n2002/07/19/big/img_90\n2002/07/20/big/img_716\n2002/08/31/big/img_17458\n2002/08/26/big/img_461\n2002/07/25/big/img_355\n2002/08/06/big/img_2152\n2002/07/27/big/img_932\n2002/07/23/big/img_232\n2002/08/08/big/img_1020\n2002/07/31/big/img_366\n2002/08/06/big/img_2667\n2002/08/21/big/img_465\n2002/08/15/big/img_305\n2002/08/02/big/img_247\n2002/07/28/big/img_46\n2002/08/27/big/img_19922\n2002/08/23/big/img_643\n2003/01/13/big/img_624\n2002/08/23/big/img_625\n2002/08/05/big/img_3787\n2003/01/13/big/img_627\n2002/09/01/big/img_16381\n2002/08/05/big/img_3668\n2002/07/21/big/img_535\n2002/08/27/big/img_19680\n2002/07/22/big/img_413\n2002/07/29/big/img_481\n2003/01/15/big/img_496\n2002/07/23/big/img_701\n2002/08/29/big/img_18670\n2002/07/28/big/img_319\n2003/01/14/big/img_517\n2002/07/26/big/img_256\n2003/01/16/big/img_593\n2002/07/30/big/img_956\n2002/07/30/big/img_667\n2002/07/25/big/img_100\n2002/08/11/big/img_570\n2002/07/26/big/img_745\n2002/08/04/big/img_834\n2002/08/25/big/img_521\n2002/08/01/big/img_2148\n2002/09/02/big/img_15183\n2002/08/22/big/img_514\n2002/08/23/big/img_477\n2002/07/23/big/img_336\n2002/07/26/big/img_481\n2002/08/20/big/img_409\n2002/07/23/big/img_918\n2002/08/09/big/img_474\n2002/08/02/big/img_929\n2002/08/31/big/img_17932\n2002/08/19/big/img_161\n2002/08/09/big/img_667\n2002/07/31/big/img_805\n2002/09/02/big/img_15678\n2002/08/31/big/img_17509\n2002/08/29/big/img_18998\n2002/07/23/big/img_301\n2002/08/07/big/img_1612\n2002/08/06/big/img_2472\n2002/07/23/big/img_466\n2002/08/27/big/img_19634\n2003/01/16/big/img_16\n2002/08/14/big/img_193\n2002/08/21/big/img_340\n2002/08/27/big/img_19799\n2002/08/01/big/img_1345\n2002/08/07/big/img_1448\n2002/08/11/big/img_324\n2003/01/16/big/img_754\n2002/08/13/big/img_418\n2003/01/16/big/img_544\n2002/08/19/big/img_135\n2002/08/10/big/img_455\n2002/08/10/big/img_693\n2002/08/31/big/img_17967\n2002/08/28/big/img_19229\n2002/08/04/big/img_811\n2002/09/01/big/img_16225\n2003/01/16/big/img_428\n2002/09/02/big/img_15295\n2002/07/26/big/img_108\n2002/07/21/big/img_477\n2002/08/07/big/img_1354\n2002/08/23/big/img_246\n2002/08/16/big/img_652\n2002/07/27/big/img_553\n2002/07/31/big/img_346\n2002/08/04/big/img_537\n2002/08/08/big/img_498\n2002/08/29/big/img_18956\n2003/01/13/big/img_922\n2002/08/31/big/img_17425\n2002/07/26/big/img_438\n2002/08/19/big/img_185\n2003/01/16/big/img_33\n2002/08/10/big/img_252\n2002/07/29/big/img_598\n2002/08/27/big/img_19820\n2002/08/06/big/img_2664\n2002/08/20/big/img_705\n2003/01/14/big/img_816\n2002/08/03/big/img_552\n2002/07/25/big/img_561\n2002/07/25/big/img_934\n2002/08/01/big/img_1893\n2003/01/14/big/img_746\n2003/01/16/big/img_519\n2002/08/03/big/img_681\n2002/07/24/big/img_808\n2002/08/14/big/img_803\n2002/08/25/big/img_155\n2002/07/30/big/img_1107\n2002/08/29/big/img_18882\n2003/01/15/big/img_598\n2002/08/19/big/img_122\n2002/07/30/big/img_428\n2002/07/24/big/img_684\n2002/08/22/big/img_192\n2002/08/22/big/img_543\n2002/08/07/big/img_1318\n2002/08/18/big/img_25\n2002/07/26/big/img_583\n2002/07/20/big/img_464\n2002/08/19/big/img_664\n2002/08/24/big/img_861\n2002/09/01/big/img_16136\n2002/08/22/big/img_400\n2002/08/12/big/img_445\n2003/01/14/big/img_174\n2002/08/27/big/img_19677\n2002/08/31/big/img_17214\n2002/08/30/big/img_18175\n2003/01/17/big/img_402\n2002/08/06/big/img_2396\n2002/08/18/big/img_448\n2002/08/21/big/img_165\n2002/08/31/big/img_17609\n2003/01/01/big/img_151\n2002/08/26/big/img_372\n2002/09/02/big/img_15994\n2002/07/26/big/img_660\n2002/09/02/big/img_15197\n2002/07/29/big/img_258\n2002/08/30/big/img_18525\n2003/01/13/big/img_368\n2002/07/29/big/img_1538\n2002/07/21/big/img_787\n2002/08/18/big/img_152\n2002/08/06/big/img_2379\n2003/01/17/big/img_864\n2002/08/27/big/img_19998\n2002/08/01/big/img_1634\n2002/07/25/big/img_414\n2002/08/22/big/img_627\n2002/08/07/big/img_1669\n2002/08/16/big/img_1052\n2002/08/31/big/img_17796\n2002/08/18/big/img_199\n2002/09/02/big/img_15147\n2002/08/09/big/img_460\n2002/08/14/big/img_581\n2002/08/30/big/img_18286\n2002/07/26/big/img_337\n2002/08/18/big/img_589\n2003/01/14/big/img_866\n2002/07/20/big/img_624\n2002/08/01/big/img_1801\n2002/07/24/big/img_683\n2002/08/09/big/img_725\n2003/01/14/big/img_34\n2002/07/30/big/img_144\n2002/07/30/big/img_706\n2002/08/08/big/img_394\n2002/08/19/big/img_619\n2002/08/06/big/img_2703\n2002/08/29/big/img_19034\n2002/07/24/big/img_67\n2002/08/27/big/img_19841\n2002/08/19/big/img_427\n2003/01/14/big/img_333\n2002/09/01/big/img_16406\n2002/07/19/big/img_882\n2002/08/17/big/img_238\n2003/01/14/big/img_739\n2002/07/22/big/img_151\n2002/08/21/big/img_743\n2002/07/25/big/img_1048\n2002/07/30/big/img_395\n2003/01/13/big/img_584\n2002/08/13/big/img_742\n2002/08/13/big/img_1168\n2003/01/14/big/img_147\n2002/07/26/big/img_803\n2002/08/05/big/img_3298\n2002/08/07/big/img_1451\n2002/08/16/big/img_424\n2002/07/29/big/img_1069\n2002/09/01/big/img_16735\n2002/07/21/big/img_637\n2003/01/14/big/img_585\n2002/08/02/big/img_358\n2003/01/13/big/img_358\n2002/08/14/big/img_198\n2002/08/17/big/img_935\n2002/08/04/big/img_42\n2002/08/30/big/img_18245\n2002/07/25/big/img_158\n2002/08/22/big/img_744\n2002/08/06/big/img_2291\n2002/08/05/big/img_3044\n2002/07/30/big/img_272\n2002/08/23/big/img_641\n2002/07/24/big/img_797\n2002/07/30/big/img_392\n2003/01/14/big/img_447\n2002/07/31/big/img_898\n2002/08/06/big/img_2812\n2002/08/13/big/img_564\n2002/07/22/big/img_43\n2002/07/26/big/img_634\n2002/07/19/big/img_843\n2002/08/26/big/img_58\n2002/07/21/big/img_375\n2002/08/25/big/img_729\n2002/07/19/big/img_561\n2003/01/15/big/img_884\n2002/07/25/big/img_891\n2002/08/09/big/img_558\n2002/08/26/big/img_587\n2002/08/13/big/img_1146\n2002/09/02/big/img_15153\n2002/07/26/big/img_316\n2002/08/01/big/img_1940\n2002/08/26/big/img_90\n2003/01/13/big/img_347\n2002/07/25/big/img_520\n2002/08/29/big/img_18718\n2002/08/28/big/img_19219\n2002/08/13/big/img_375\n2002/07/20/big/img_719\n2002/08/31/big/img_17431\n2002/07/28/big/img_192\n2002/08/26/big/img_259\n2002/08/18/big/img_484\n2002/07/29/big/img_580\n2002/07/26/big/img_84\n2002/08/02/big/img_302\n2002/08/31/big/img_17007\n2003/01/15/big/img_543\n2002/09/01/big/img_16488\n2002/08/22/big/img_798\n2002/07/30/big/img_383\n2002/08/04/big/img_668\n2002/08/13/big/img_156\n2002/08/07/big/img_1353\n2002/07/25/big/img_281\n2003/01/14/big/img_587\n2003/01/15/big/img_524\n2002/08/19/big/img_726\n2002/08/21/big/img_709\n2002/08/26/big/img_465\n2002/07/31/big/img_658\n2002/08/28/big/img_19148\n2002/07/23/big/img_423\n2002/08/16/big/img_758\n2002/08/22/big/img_523\n2002/08/16/big/img_591\n2002/08/23/big/img_845\n2002/07/26/big/img_678\n2002/08/09/big/img_806\n2002/08/06/big/img_2369\n2002/07/29/big/img_457\n2002/07/19/big/img_278\n2002/08/30/big/img_18107\n2002/07/26/big/img_444\n2002/08/20/big/img_278\n2002/08/26/big/img_92\n2002/08/26/big/img_257\n2002/07/25/big/img_266\n2002/08/05/big/img_3829\n2002/07/26/big/img_757\n2002/07/29/big/img_1536\n2002/08/09/big/img_472\n2003/01/17/big/img_480\n2002/08/28/big/img_19355\n2002/07/26/big/img_97\n2002/08/06/big/img_2503\n2002/07/19/big/img_254\n2002/08/01/big/img_1470\n2002/08/21/big/img_42\n2002/08/20/big/img_217\n2002/08/06/big/img_2459\n2002/07/19/big/img_552\n2002/08/13/big/img_717\n2002/08/12/big/img_586\n2002/08/20/big/img_411\n2003/01/13/big/img_768\n2002/08/07/big/img_1747\n2002/08/15/big/img_385\n2002/08/01/big/img_1648\n2002/08/15/big/img_311\n2002/08/21/big/img_95\n2002/08/09/big/img_108\n2002/08/21/big/img_398\n2002/08/17/big/img_340\n2002/08/14/big/img_474\n2002/08/13/big/img_294\n2002/08/24/big/img_840\n2002/08/09/big/img_808\n2002/08/23/big/img_491\n2002/07/28/big/img_33\n2003/01/13/big/img_664\n2002/08/02/big/img_261\n2002/08/09/big/img_591\n2002/07/26/big/img_309\n2003/01/14/big/img_372\n2002/08/19/big/img_581\n2002/08/19/big/img_168\n2002/08/26/big/img_422\n2002/07/24/big/img_106\n2002/08/01/big/img_1936\n2002/08/05/big/img_3764\n2002/08/21/big/img_266\n2002/08/31/big/img_17968\n2002/08/01/big/img_1941\n2002/08/15/big/img_550\n2002/08/14/big/img_13\n2002/07/30/big/img_171\n2003/01/13/big/img_490\n2002/07/25/big/img_427\n2002/07/19/big/img_770\n2002/08/12/big/img_759\n2003/01/15/big/img_1360\n2002/08/05/big/img_3692\n2003/01/16/big/img_30\n2002/07/25/big/img_1026\n2002/07/22/big/img_288\n2002/08/29/big/img_18801\n2002/07/24/big/img_793\n2002/08/13/big/img_178\n2002/08/06/big/img_2322\n2003/01/14/big/img_560\n2002/08/18/big/img_408\n2003/01/16/big/img_915\n2003/01/16/big/img_679\n2002/08/07/big/img_1552\n2002/08/29/big/img_19050\n2002/08/01/big/img_2172\n2002/07/31/big/img_30\n2002/07/30/big/img_1019\n2002/07/30/big/img_587\n2003/01/13/big/img_773\n2002/07/30/big/img_410\n2002/07/28/big/img_65\n2002/08/05/big/img_3138\n2002/07/23/big/img_541\n2002/08/22/big/img_963\n2002/07/27/big/img_657\n2002/07/30/big/img_1051\n2003/01/16/big/img_150\n2002/07/31/big/img_519\n2002/08/01/big/img_1961\n2002/08/05/big/img_3752\n2002/07/23/big/img_631\n2003/01/14/big/img_237\n2002/07/28/big/img_21\n2002/07/22/big/img_813\n2002/08/05/big/img_3563\n2003/01/17/big/img_620\n2002/07/19/big/img_523\n2002/07/30/big/img_904\n2002/08/29/big/img_18642\n2002/08/11/big/img_492\n2002/08/01/big/img_2130\n2002/07/25/big/img_618\n2002/08/17/big/img_305\n2003/01/16/big/img_520\n2002/07/26/big/img_495\n2002/08/17/big/img_164\n2002/08/03/big/img_440\n2002/07/24/big/img_441\n2002/08/06/big/img_2146\n2002/08/11/big/img_558\n2002/08/02/big/img_545\n2002/08/31/big/img_18090\n2003/01/01/big/img_136\n2002/07/25/big/img_1099\n2003/01/13/big/img_728\n2003/01/16/big/img_197\n2002/07/26/big/img_651\n2002/08/11/big/img_676\n2003/01/15/big/img_10\n2002/08/21/big/img_250\n2002/08/14/big/img_325\n2002/08/04/big/img_390\n2002/07/24/big/img_554\n2003/01/16/big/img_333\n2002/07/31/big/img_922\n2002/09/02/big/img_15586\n2003/01/16/big/img_184\n2002/07/22/big/img_766\n2002/07/21/big/img_608\n2002/08/07/big/img_1578\n2002/08/17/big/img_961\n2002/07/27/big/img_324\n2002/08/05/big/img_3765\n2002/08/23/big/img_462\n2003/01/16/big/img_382\n2002/08/27/big/img_19838\n2002/08/01/big/img_1505\n2002/08/21/big/img_662\n2002/08/14/big/img_605\n2002/08/19/big/img_816\n2002/07/29/big/img_136\n2002/08/20/big/img_719\n2002/08/06/big/img_2826\n2002/08/10/big/img_630\n2003/01/17/big/img_973\n2002/08/14/big/img_116\n2002/08/02/big/img_666\n2002/08/21/big/img_710\n2002/08/05/big/img_55\n2002/07/31/big/img_229\n2002/08/01/big/img_1549\n2002/07/23/big/img_432\n2002/07/21/big/img_430\n2002/08/21/big/img_549\n2002/08/08/big/img_985\n2002/07/20/big/img_610\n2002/07/23/big/img_978\n2002/08/23/big/img_219\n2002/07/25/big/img_175\n2003/01/15/big/img_230\n2002/08/23/big/img_385\n2002/07/31/big/img_879\n2002/08/12/big/img_495\n2002/08/22/big/img_499\n2002/08/30/big/img_18322\n2002/08/15/big/img_795\n2002/08/13/big/img_835\n2003/01/17/big/img_930\n2002/07/30/big/img_873\n2002/08/11/big/img_257\n2002/07/31/big/img_593\n2002/08/21/big/img_916\n2003/01/13/big/img_814\n2002/07/25/big/img_722\n2002/08/16/big/img_379\n2002/07/31/big/img_497\n2002/07/22/big/img_602\n2002/08/21/big/img_642\n2002/08/21/big/img_614\n2002/08/23/big/img_482\n2002/07/29/big/img_603\n2002/08/13/big/img_705\n2002/07/23/big/img_833\n2003/01/14/big/img_511\n2002/07/24/big/img_376\n2002/08/17/big/img_1030\n2002/08/05/big/img_3576\n2002/08/16/big/img_540\n2002/07/22/big/img_630\n2002/08/10/big/img_180\n2002/08/14/big/img_905\n2002/08/29/big/img_18777\n2002/08/22/big/img_693\n2003/01/16/big/img_933\n2002/08/20/big/img_555\n2002/08/15/big/img_549\n2003/01/14/big/img_830\n2003/01/16/big/img_64\n2002/08/27/big/img_19670\n2002/08/22/big/img_729\n2002/07/27/big/img_981\n2002/08/09/big/img_458\n2003/01/17/big/img_884\n2002/07/25/big/img_639\n2002/08/31/big/img_18008\n2002/08/22/big/img_249\n2002/08/17/big/img_971\n2002/08/04/big/img_308\n2002/07/28/big/img_362\n2002/08/12/big/img_142\n2002/08/26/big/img_61\n2002/08/14/big/img_422\n2002/07/19/big/img_607\n2003/01/15/big/img_717\n2002/08/01/big/img_1475\n2002/08/29/big/img_19061\n2003/01/01/big/img_346\n2002/07/20/big/img_315\n2003/01/15/big/img_756\n2002/08/15/big/img_879\n2002/08/08/big/img_615\n2003/01/13/big/img_431\n2002/08/05/big/img_3233\n2002/08/24/big/img_526\n2003/01/13/big/img_717\n2002/09/01/big/img_16408\n2002/07/22/big/img_217\n2002/07/31/big/img_960\n2002/08/21/big/img_610\n2002/08/05/big/img_3753\n2002/08/03/big/img_151\n2002/08/21/big/img_267\n2002/08/01/big/img_2175\n2002/08/04/big/img_556\n2002/08/21/big/img_527\n2002/09/02/big/img_15800\n2002/07/27/big/img_156\n2002/07/20/big/img_590\n2002/08/15/big/img_700\n2002/08/08/big/img_444\n2002/07/25/big/img_94\n2002/07/24/big/img_778\n2002/08/14/big/img_694\n2002/07/20/big/img_666\n2002/08/02/big/img_200\n2002/08/02/big/img_578\n2003/01/17/big/img_332\n2002/09/01/big/img_16352\n2002/08/27/big/img_19668\n2002/07/23/big/img_823\n2002/08/13/big/img_431\n2003/01/16/big/img_463\n2002/08/27/big/img_19711\n2002/08/23/big/img_154\n2002/07/31/big/img_360\n2002/08/23/big/img_555\n2002/08/10/big/img_561\n2003/01/14/big/img_550\n2002/08/07/big/img_1370\n2002/07/30/big/img_1184\n2002/08/01/big/img_1445\n2002/08/23/big/img_22\n2002/07/30/big/img_606\n2003/01/17/big/img_271\n2002/08/31/big/img_17316\n2002/08/16/big/img_973\n2002/07/26/big/img_77\n2002/07/20/big/img_788\n2002/08/06/big/img_2426\n2002/08/07/big/img_1498\n2002/08/16/big/img_358\n2002/08/06/big/img_2851\n2002/08/12/big/img_359\n2002/08/01/big/img_1521\n2002/08/02/big/img_709\n2002/08/20/big/img_935\n2002/08/12/big/img_188\n2002/08/24/big/img_411\n2002/08/22/big/img_680\n2002/08/06/big/img_2480\n2002/07/20/big/img_627\n2002/07/30/big/img_214\n2002/07/25/big/img_354\n2002/08/02/big/img_636\n2003/01/15/big/img_661\n2002/08/07/big/img_1327\n2002/08/01/big/img_2108\n2002/08/31/big/img_17919\n2002/08/29/big/img_18768\n2002/08/05/big/img_3840\n2002/07/26/big/img_242\n2003/01/14/big/img_451\n2002/08/20/big/img_923\n2002/08/27/big/img_19908\n2002/08/16/big/img_282\n2002/08/19/big/img_440\n2003/01/01/big/img_230\n2002/08/08/big/img_212\n2002/07/20/big/img_443\n2002/08/25/big/img_635\n2003/01/13/big/img_1169\n2002/07/26/big/img_998\n2002/08/15/big/img_995\n2002/08/06/big/img_3002\n2002/07/29/big/img_460\n2003/01/14/big/img_925\n2002/07/23/big/img_539\n2002/08/16/big/img_694\n2003/01/13/big/img_459\n2002/07/23/big/img_249\n2002/08/20/big/img_539\n2002/08/04/big/img_186\n2002/08/26/big/img_264\n2002/07/22/big/img_704\n2002/08/25/big/img_277\n2002/08/22/big/img_988\n2002/07/29/big/img_504\n2002/08/05/big/img_3600\n2002/08/30/big/img_18380\n2003/01/14/big/img_937\n2002/08/21/big/img_254\n2002/08/10/big/img_130\n2002/08/20/big/img_339\n2003/01/14/big/img_428\n2002/08/20/big/img_889\n2002/08/31/big/img_17637\n2002/07/26/big/img_644\n2002/09/01/big/img_16776\n2002/08/06/big/img_2239\n2002/08/06/big/img_2646\n2003/01/13/big/img_491\n2002/08/10/big/img_579\n2002/08/21/big/img_713\n2002/08/22/big/img_482\n2002/07/22/big/img_167\n2002/07/24/big/img_539\n2002/08/14/big/img_721\n2002/07/25/big/img_389\n2002/09/01/big/img_16591\n2002/08/13/big/img_543\n2003/01/14/big/img_432\n2002/08/09/big/img_287\n2002/07/26/big/img_126\n2002/08/23/big/img_412\n2002/08/15/big/img_1034\n2002/08/28/big/img_19485\n2002/07/31/big/img_236\n2002/07/30/big/img_523\n2002/07/19/big/img_141\n2003/01/17/big/img_957\n2002/08/04/big/img_81\n2002/07/25/big/img_206\n2002/08/15/big/img_716\n2002/08/13/big/img_403\n2002/08/15/big/img_685\n2002/07/26/big/img_884\n2002/07/19/big/img_499\n2002/07/23/big/img_772\n2002/07/27/big/img_752\n2003/01/14/big/img_493\n2002/08/25/big/img_664\n2002/07/31/big/img_334\n2002/08/26/big/img_678\n2002/09/01/big/img_16541\n2003/01/14/big/img_347\n2002/07/23/big/img_187\n2002/07/30/big/img_1163\n2002/08/05/big/img_35\n2002/08/22/big/img_944\n2002/08/07/big/img_1239\n2002/07/29/big/img_1215\n2002/08/03/big/img_312\n2002/08/05/big/img_3523\n2002/07/29/big/img_218\n2002/08/13/big/img_672\n2002/08/16/big/img_205\n2002/08/17/big/img_594\n2002/07/29/big/img_1411\n2002/07/30/big/img_942\n2003/01/16/big/img_312\n2002/08/08/big/img_312\n2002/07/25/big/img_15\n2002/08/09/big/img_839\n2002/08/01/big/img_2069\n2002/08/31/big/img_17512\n2002/08/01/big/img_3\n2002/07/31/big/img_320\n2003/01/15/big/img_1265\n2002/08/14/big/img_563\n2002/07/31/big/img_167\n2002/08/20/big/img_374\n2002/08/13/big/img_406\n2002/08/08/big/img_625\n2002/08/02/big/img_314\n2002/08/27/big/img_19964\n2002/09/01/big/img_16670\n2002/07/31/big/img_599\n2002/08/29/big/img_18906\n2002/07/24/big/img_373\n2002/07/26/big/img_513\n2002/09/02/big/img_15497\n2002/08/19/big/img_117\n2003/01/01/big/img_158\n2002/08/24/big/img_178\n2003/01/13/big/img_935\n2002/08/13/big/img_609\n2002/08/30/big/img_18341\n2002/08/25/big/img_674\n2003/01/13/big/img_209\n2002/08/13/big/img_258\n2002/08/05/big/img_3543\n2002/08/07/big/img_1970\n2002/08/06/big/img_3004\n2003/01/17/big/img_487\n2002/08/24/big/img_873\n2002/08/29/big/img_18730\n2002/08/09/big/img_375\n2003/01/16/big/img_751\n2002/08/02/big/img_603\n2002/08/19/big/img_325\n2002/09/01/big/img_16420\n2002/08/05/big/img_3633\n2002/08/21/big/img_516\n2002/07/19/big/img_501\n2002/07/26/big/img_688\n2002/07/24/big/img_256\n2002/07/25/big/img_438\n2002/07/31/big/img_1017\n2002/08/22/big/img_512\n2002/07/21/big/img_543\n2002/08/08/big/img_223\n2002/08/19/big/img_189\n2002/08/12/big/img_630\n2002/07/30/big/img_958\n2002/07/28/big/img_208\n2002/08/31/big/img_17691\n2002/07/22/big/img_542\n2002/07/19/big/img_741\n2002/07/19/big/img_158\n2002/08/15/big/img_399\n2002/08/01/big/img_2159\n2002/08/14/big/img_455\n2002/08/17/big/img_1011\n2002/08/26/big/img_744\n2002/08/12/big/img_624\n2003/01/17/big/img_821\n2002/08/16/big/img_980\n2002/07/28/big/img_281\n2002/07/25/big/img_171\n2002/08/03/big/img_116\n2002/07/22/big/img_467\n2002/07/31/big/img_750\n2002/07/26/big/img_435\n2002/07/19/big/img_822\n2002/08/13/big/img_626\n2002/08/11/big/img_344\n2002/08/02/big/img_473\n2002/09/01/big/img_16817\n2002/08/01/big/img_1275\n2002/08/28/big/img_19270\n2002/07/23/big/img_607\n2002/08/09/big/img_316\n2002/07/29/big/img_626\n2002/07/24/big/img_824\n2002/07/22/big/img_342\n2002/08/08/big/img_794\n2002/08/07/big/img_1209\n2002/07/19/big/img_18\n2002/08/25/big/img_634\n2002/07/24/big/img_730\n2003/01/17/big/img_356\n2002/07/23/big/img_305\n2002/07/30/big/img_453\n2003/01/13/big/img_972\n2002/08/06/big/img_2610\n2002/08/29/big/img_18920\n2002/07/31/big/img_123\n2002/07/26/big/img_979\n2002/08/24/big/img_635\n2002/08/05/big/img_3704\n2002/08/07/big/img_1358\n2002/07/22/big/img_306\n2002/08/13/big/img_619\n2002/08/02/big/img_366\n"
  },
  {
    "path": "src/dot/gpen/retinaface/data/__init__.py",
    "content": "#!/usr/bin/env python3\n\nfrom dot.gpen.retinaface.data.config import cfg_mnet, cfg_re50\nfrom dot.gpen.retinaface.data.data_augment import (\n    _crop,\n    _distort,\n    _expand,\n    _mirror,\n    _pad_to_square,\n    _resize_subtract_mean,\n    preproc,\n)\nfrom dot.gpen.retinaface.data.wider_face import WiderFaceDetection, detection_collate\n"
  },
  {
    "path": "src/dot/gpen/retinaface/data/config.py",
    "content": "#!/usr/bin/env python3\n\ncfg_mnet = {\n    \"name\": \"mobilenet0.25\",\n    \"min_sizes\": [[16, 32], [64, 128], [256, 512]],\n    \"steps\": [8, 16, 32],\n    \"variance\": [0.1, 0.2],\n    \"clip\": False,\n    \"loc_weight\": 2.0,\n    \"gpu_train\": True,\n    \"batch_size\": 32,\n    \"ngpu\": 1,\n    \"epoch\": 250,\n    \"decay1\": 190,\n    \"decay2\": 220,\n    \"image_size\": 640,\n    \"pretrain\": False,\n    \"return_layers\": {\"stage1\": 1, \"stage2\": 2, \"stage3\": 3},\n    \"in_channel\": 32,\n    \"out_channel\": 64,\n}\n\ncfg_re50 = {\n    \"name\": \"Resnet50\",\n    \"min_sizes\": [[16, 32], [64, 128], [256, 512]],\n    \"steps\": [8, 16, 32],\n    \"variance\": [0.1, 0.2],\n    \"clip\": False,\n    \"loc_weight\": 2.0,\n    \"gpu_train\": True,\n    \"batch_size\": 24,\n    \"ngpu\": 4,\n    \"epoch\": 100,\n    \"decay1\": 70,\n    \"decay2\": 90,\n    \"image_size\": 840,\n    \"pretrain\": False,\n    \"return_layers\": {\"layer2\": 1, \"layer3\": 2, \"layer4\": 3},\n    \"in_channel\": 256,\n    \"out_channel\": 256,\n}\n"
  },
  {
    "path": "src/dot/gpen/retinaface/data/data_augment.py",
    "content": "#!/usr/bin/env python3\n\nimport random\n\nimport cv2\nimport numpy as np\n\nfrom ..utils.box_utils import matrix_iof\n\n\ndef _crop(image, boxes, labels, landm, img_dim):\n    height, width, _ = image.shape\n    pad_image_flag = True\n\n    for _ in range(250):\n        \"\"\"\n        if random.uniform(0, 1) <= 0.2:\n            scale = 1.0\n        else:\n            scale = random.uniform(0.3, 1.0)\n        \"\"\"\n        PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0]\n        scale = random.choice(PRE_SCALES)\n        short_side = min(width, height)\n        w = int(scale * short_side)\n        h = w\n\n        length = random.randrange(width - w)\n        if width == w:\n            length = 0\n\n        if height == h:\n            t = 0\n        else:\n            t = random.randrange(height - h)\n        roi = np.array((length, t, length + w, t + h))\n\n        value = matrix_iof(boxes, roi[np.newaxis])\n        flag = value >= 1\n        if not flag.any():\n            continue\n\n        centers = (boxes[:, :2] + boxes[:, 2:]) / 2\n        mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)\n        boxes_t = boxes[mask_a].copy()\n        labels_t = labels[mask_a].copy()\n        landms_t = landm[mask_a].copy()\n        landms_t = landms_t.reshape([-1, 5, 2])\n\n        if boxes_t.shape[0] == 0:\n            continue\n\n        image_t = image[roi[1] : roi[3], roi[0] : roi[2]]\n\n        boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])\n        boxes_t[:, :2] -= roi[:2]\n        boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])\n        boxes_t[:, 2:] -= roi[:2]\n\n        # landm\n        landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2]\n        landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0]))\n        landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2])\n        landms_t = landms_t.reshape([-1, 10])\n\n        # make sure that the cropped image contains at least one face > 16 pixel at training image scale\n        b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim\n        b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim\n        mask_b = np.minimum(b_w_t, b_h_t) > 0.0\n        boxes_t = boxes_t[mask_b]\n        labels_t = labels_t[mask_b]\n        landms_t = landms_t[mask_b]\n\n        if boxes_t.shape[0] == 0:\n            continue\n\n        pad_image_flag = False\n\n        return image_t, boxes_t, labels_t, landms_t, pad_image_flag\n    return image, boxes, labels, landm, pad_image_flag\n\n\ndef _distort(image):\n    def _convert(image, alpha=1, beta=0):\n        tmp = image.astype(float) * alpha + beta\n        tmp[tmp < 0] = 0\n        tmp[tmp > 255] = 255\n        image[:] = tmp\n\n    image = image.copy()\n\n    if random.randrange(2):\n\n        # brightness distortion\n        if random.randrange(2):\n            _convert(image, beta=random.uniform(-32, 32))\n\n        # contrast distortion\n        if random.randrange(2):\n            _convert(image, alpha=random.uniform(0.5, 1.5))\n\n        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n        # saturation distortion\n        if random.randrange(2):\n            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))\n\n        # hue distortion\n        if random.randrange(2):\n            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)\n            tmp %= 180\n            image[:, :, 0] = tmp\n\n        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n\n    else:\n\n        # brightness distortion\n        if random.randrange(2):\n            _convert(image, beta=random.uniform(-32, 32))\n\n        image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n\n        # saturation distortion\n        if random.randrange(2):\n            _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))\n\n        # hue distortion\n        if random.randrange(2):\n            tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)\n            tmp %= 180\n            image[:, :, 0] = tmp\n\n        image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)\n\n        # contrast distortion\n        if random.randrange(2):\n            _convert(image, alpha=random.uniform(0.5, 1.5))\n\n    return image\n\n\ndef _expand(image, boxes, fill, p):\n    if random.randrange(2):\n        return image, boxes\n\n    height, width, depth = image.shape\n\n    scale = random.uniform(1, p)\n    w = int(scale * width)\n    h = int(scale * height)\n\n    left = random.randint(0, w - width)\n    top = random.randint(0, h - height)\n\n    boxes_t = boxes.copy()\n    boxes_t[:, :2] += (left, top)\n    boxes_t[:, 2:] += (left, top)\n    expand_image = np.empty((h, w, depth), dtype=image.dtype)\n    expand_image[:, :] = fill\n    expand_image[top : top + height, left : left + width] = image\n    image = expand_image\n\n    return image, boxes_t\n\n\ndef _mirror(image, boxes, landms):\n    _, width, _ = image.shape\n    if random.randrange(2):\n        image = image[:, ::-1]\n        boxes = boxes.copy()\n        boxes[:, 0::2] = width - boxes[:, 2::-2]\n\n        # landm\n        landms = landms.copy()\n        landms = landms.reshape([-1, 5, 2])\n        landms[:, :, 0] = width - landms[:, :, 0]\n        tmp = landms[:, 1, :].copy()\n        landms[:, 1, :] = landms[:, 0, :]\n        landms[:, 0, :] = tmp\n        tmp1 = landms[:, 4, :].copy()\n        landms[:, 4, :] = landms[:, 3, :]\n        landms[:, 3, :] = tmp1\n        landms = landms.reshape([-1, 10])\n\n    return image, boxes, landms\n\n\ndef _pad_to_square(image, rgb_mean, pad_image_flag):\n    if not pad_image_flag:\n        return image\n    height, width, _ = image.shape\n    long_side = max(width, height)\n    image_t = np.empty((long_side, long_side, 3), dtype=image.dtype)\n    image_t[:, :] = rgb_mean\n    image_t[0 : 0 + height, 0 : 0 + width] = image\n    return image_t\n\n\ndef _resize_subtract_mean(image, insize, rgb_mean):\n    interp_methods = [\n        cv2.INTER_LINEAR,\n        cv2.INTER_CUBIC,\n        cv2.INTER_AREA,\n        cv2.INTER_NEAREST,\n        cv2.INTER_LANCZOS4,\n    ]\n    interp_method = interp_methods[random.randrange(5)]\n    image = cv2.resize(image, (insize, insize), interpolation=interp_method)\n    image = image.astype(np.float32)\n    image -= rgb_mean\n    return image.transpose(2, 0, 1)\n\n\nclass preproc(object):\n    def __init__(self, img_dim, rgb_means):\n        self.img_dim = img_dim\n        self.rgb_means = rgb_means\n\n    def __call__(self, image, targets):\n        assert targets.shape[0] > 0, \"this image does not have gt\"\n\n        boxes = targets[:, :4].copy()\n        labels = targets[:, -1].copy()\n        landm = targets[:, 4:-1].copy()\n\n        image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(\n            image, boxes, labels, landm, self.img_dim\n        )\n        image_t = _distort(image_t)\n        image_t = _pad_to_square(image_t, self.rgb_means, pad_image_flag)\n        image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t)\n        height, width, _ = image_t.shape\n        image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means)\n        boxes_t[:, 0::2] /= width\n        boxes_t[:, 1::2] /= height\n\n        landm_t[:, 0::2] /= width\n        landm_t[:, 1::2] /= height\n\n        labels_t = np.expand_dims(labels_t, 1)\n        targets_t = np.hstack((boxes_t, landm_t, labels_t))\n\n        return image_t, targets_t\n"
  },
  {
    "path": "src/dot/gpen/retinaface/data/wider_face.py",
    "content": "#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\n\nclass WiderFaceDetection(data.Dataset):\n    def __init__(self, txt_path, preproc=None):\n        self.preproc = preproc\n        self.imgs_path = []\n        self.words = []\n        f = open(txt_path, \"r\")\n        lines = f.readlines()\n        isFirst = True\n        labels = []\n        for line in lines:\n            line = line.rstrip()\n            if line.startswith(\"#\"):\n                if isFirst is True:\n                    isFirst = False\n                else:\n                    labels_copy = labels.copy()\n                    self.words.append(labels_copy)\n                    labels.clear()\n                path = line[2:]\n                path = txt_path.replace(\"label.txt\", \"images/\") + path\n                self.imgs_path.append(path)\n            else:\n                line = line.split(\" \")\n                label = [float(x) for x in line]\n                labels.append(label)\n\n        self.words.append(labels)\n\n    def __len__(self):\n        return len(self.imgs_path)\n\n    def __getitem__(self, index):\n        img = cv2.imread(self.imgs_path[index])\n        height, width, _ = img.shape\n\n        labels = self.words[index]\n        annotations = np.zeros((0, 15))\n        if len(labels) == 0:\n            return annotations\n        for idx, label in enumerate(labels):\n            annotation = np.zeros((1, 15))\n            # bbox\n            annotation[0, 0] = label[0]  # x1\n            annotation[0, 1] = label[1]  # y1\n            annotation[0, 2] = label[0] + label[2]  # x2\n            annotation[0, 3] = label[1] + label[3]  # y2\n\n            # landmarks\n            annotation[0, 4] = label[4]  # l0_x\n            annotation[0, 5] = label[5]  # l0_y\n            annotation[0, 6] = label[7]  # l1_x\n            annotation[0, 7] = label[8]  # l1_y\n            annotation[0, 8] = label[10]  # l2_x\n            annotation[0, 9] = label[11]  # l2_y\n            annotation[0, 10] = label[13]  # l3_x\n            annotation[0, 11] = label[14]  # l3_y\n            annotation[0, 12] = label[16]  # l4_x\n            annotation[0, 13] = label[17]  # l4_y\n            if annotation[0, 4] < 0:\n                annotation[0, 14] = -1\n            else:\n                annotation[0, 14] = 1\n\n            annotations = np.append(annotations, annotation, axis=0)\n        target = np.array(annotations)\n        if self.preproc is not None:\n            img, target = self.preproc(img, target)\n\n        return torch.from_numpy(img), target\n\n\ndef detection_collate(batch):\n    \"\"\"Custom collate fn for dealing with batches of images that have a different\n    number of associated object annotations (bounding boxes).\n\n    Arguments:\n        batch: (tuple) A tuple of tensor images and lists of annotations\n\n    Return:\n        A tuple containing:\n            1) (tensor) batch of images stacked on their 0 dim\n            2) (list of tensors) annotations for a given image are stacked on 0 dim\n    \"\"\"\n    targets = []\n    imgs = []\n    for _, sample in enumerate(batch):\n        for _, tup in enumerate(sample):\n            if torch.is_tensor(tup):\n                imgs.append(tup)\n            elif isinstance(tup, type(np.empty(0))):\n                annos = torch.from_numpy(tup).float()\n                targets.append(annos)\n\n    return (torch.stack(imgs, 0), targets)\n"
  },
  {
    "path": "src/dot/gpen/retinaface/facemodels/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/retinaface/facemodels/net.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef conv_bn(inp, oup, stride=1, leaky=0):\n    return nn.Sequential(\n        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n        nn.BatchNorm2d(oup),\n        nn.LeakyReLU(negative_slope=leaky, inplace=True),\n    )\n\n\ndef conv_bn_no_relu(inp, oup, stride):\n    return nn.Sequential(\n        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n        nn.BatchNorm2d(oup),\n    )\n\n\ndef conv_bn1X1(inp, oup, stride, leaky=0):\n    return nn.Sequential(\n        nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False),\n        nn.BatchNorm2d(oup),\n        nn.LeakyReLU(negative_slope=leaky, inplace=True),\n    )\n\n\ndef conv_dw(inp, oup, stride, leaky=0.1):\n    return nn.Sequential(\n        nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),\n        nn.BatchNorm2d(inp),\n        nn.LeakyReLU(negative_slope=leaky, inplace=True),\n        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),\n        nn.BatchNorm2d(oup),\n        nn.LeakyReLU(negative_slope=leaky, inplace=True),\n    )\n\n\nclass SSH(nn.Module):\n    def __init__(self, in_channel, out_channel):\n        super(SSH, self).__init__()\n        assert out_channel % 4 == 0\n        leaky = 0\n        if out_channel <= 64:\n            leaky = 0.1\n        self.conv3X3 = conv_bn_no_relu(in_channel, out_channel // 2, stride=1)\n\n        self.conv5X5_1 = conv_bn(in_channel, out_channel // 4, stride=1, leaky=leaky)\n        self.conv5X5_2 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)\n\n        self.conv7X7_2 = conv_bn(\n            out_channel // 4, out_channel // 4, stride=1, leaky=leaky\n        )\n        self.conv7x7_3 = conv_bn_no_relu(out_channel // 4, out_channel // 4, stride=1)\n\n    def forward(self, input):\n        conv3X3 = self.conv3X3(input)\n\n        conv5X5_1 = self.conv5X5_1(input)\n        conv5X5 = self.conv5X5_2(conv5X5_1)\n\n        conv7X7_2 = self.conv7X7_2(conv5X5_1)\n        conv7X7 = self.conv7x7_3(conv7X7_2)\n\n        out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1)\n        out = F.relu(out)\n        return out\n\n\nclass FPN(nn.Module):\n    def __init__(self, in_channels_list, out_channels):\n        super(FPN, self).__init__()\n        leaky = 0\n        if out_channels <= 64:\n            leaky = 0.1\n        self.output1 = conv_bn1X1(\n            in_channels_list[0], out_channels, stride=1, leaky=leaky\n        )\n        self.output2 = conv_bn1X1(\n            in_channels_list[1], out_channels, stride=1, leaky=leaky\n        )\n        self.output3 = conv_bn1X1(\n            in_channels_list[2], out_channels, stride=1, leaky=leaky\n        )\n\n        self.merge1 = conv_bn(out_channels, out_channels, leaky=leaky)\n        self.merge2 = conv_bn(out_channels, out_channels, leaky=leaky)\n\n    def forward(self, input):\n        input = list(input.values())\n\n        output1 = self.output1(input[0])\n        output2 = self.output2(input[1])\n        output3 = self.output3(input[2])\n\n        up3 = F.interpolate(\n            output3, size=[output2.size(2), output2.size(3)], mode=\"nearest\"\n        )\n        output2 = output2 + up3\n        output2 = self.merge2(output2)\n\n        up2 = F.interpolate(\n            output2, size=[output1.size(2), output1.size(3)], mode=\"nearest\"\n        )\n        output1 = output1 + up2\n        output1 = self.merge1(output1)\n\n        out = [output1, output2, output3]\n        return out\n\n\nclass MobileNetV1(nn.Module):\n    def __init__(self):\n        super(MobileNetV1, self).__init__()\n        self.stage1 = nn.Sequential(\n            conv_bn(3, 8, 2, leaky=0.1),  # 3\n            conv_dw(8, 16, 1),  # 7\n            conv_dw(16, 32, 2),  # 11\n            conv_dw(32, 32, 1),  # 19\n            conv_dw(32, 64, 2),  # 27\n            conv_dw(64, 64, 1),  # 43\n        )\n        self.stage2 = nn.Sequential(\n            conv_dw(64, 128, 2),  # 43 + 16 = 59\n            conv_dw(128, 128, 1),  # 59 + 32 = 91\n            conv_dw(128, 128, 1),  # 91 + 32 = 123\n            conv_dw(128, 128, 1),  # 123 + 32 = 155\n            conv_dw(128, 128, 1),  # 155 + 32 = 187\n            conv_dw(128, 128, 1),  # 187 + 32 = 219\n        )\n        self.stage3 = nn.Sequential(\n            conv_dw(128, 256, 2),  # 219 +3 2 = 241\n            conv_dw(256, 256, 1),  # 241 + 64 = 301\n        )\n        self.avg = nn.AdaptiveAvgPool2d((1, 1))\n        self.fc = nn.Linear(256, 1000)\n\n    def forward(self, x):\n        x = self.stage1(x)\n        x = self.stage2(x)\n        x = self.stage3(x)\n        x = self.avg(x)\n        x = x.view(-1, 256)\n        x = self.fc(x)\n        return x\n"
  },
  {
    "path": "src/dot/gpen/retinaface/facemodels/retinaface.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models._utils as _utils\n\nfrom .net import FPN as FPN\nfrom .net import SSH as SSH\nfrom .net import MobileNetV1 as MobileNetV1\n\n\nclass ClassHead(nn.Module):\n    def __init__(self, inchannels=512, num_anchors=3):\n        super(ClassHead, self).__init__()\n        self.num_anchors = num_anchors\n        self.conv1x1 = nn.Conv2d(\n            inchannels, self.num_anchors * 2, kernel_size=(1, 1), stride=1, padding=0\n        )\n\n    def forward(self, x):\n        out = self.conv1x1(x)\n        out = out.permute(0, 2, 3, 1).contiguous()\n\n        return out.view(out.shape[0], -1, 2)\n\n\nclass BboxHead(nn.Module):\n    def __init__(self, inchannels=512, num_anchors=3):\n        super(BboxHead, self).__init__()\n        self.conv1x1 = nn.Conv2d(\n            inchannels, num_anchors * 4, kernel_size=(1, 1), stride=1, padding=0\n        )\n\n    def forward(self, x):\n        out = self.conv1x1(x)\n        out = out.permute(0, 2, 3, 1).contiguous()\n\n        return out.view(out.shape[0], -1, 4)\n\n\nclass LandmarkHead(nn.Module):\n    def __init__(self, inchannels=512, num_anchors=3):\n        super(LandmarkHead, self).__init__()\n        self.conv1x1 = nn.Conv2d(\n            inchannels, num_anchors * 10, kernel_size=(1, 1), stride=1, padding=0\n        )\n\n    def forward(self, x):\n        out = self.conv1x1(x)\n        out = out.permute(0, 2, 3, 1).contiguous()\n\n        return out.view(out.shape[0], -1, 10)\n\n\nclass RetinaFace(nn.Module):\n    def __init__(self, cfg=None, phase=\"train\"):\n        \"\"\"\n        :param cfg:  Network related settings.\n        :param phase: train or test.\n        \"\"\"\n        super(RetinaFace, self).__init__()\n        self.phase = phase\n        backbone = None\n        if cfg[\"name\"] == \"mobilenet0.25\":\n            backbone = MobileNetV1()\n            if cfg[\"pretrain\"]:\n                checkpoint = torch.load(\n                    \"./weights/mobilenetV1X0.25_pretrain.tar\",\n                    map_location=torch.device(\"cpu\"),\n                )\n                from collections import OrderedDict\n\n                new_state_dict = OrderedDict()\n                for k, v in checkpoint[\"state_dict\"].items():\n                    name = k[7:]  # remove module.\n                    new_state_dict[name] = v\n                # load params\n                backbone.load_state_dict(new_state_dict)\n        elif cfg[\"name\"] == \"Resnet50\":\n            import torchvision.models as models\n\n            backbone = models.resnet50(pretrained=cfg[\"pretrain\"])\n\n        self.body = _utils.IntermediateLayerGetter(backbone, cfg[\"return_layers\"])\n        in_channels_stage2 = cfg[\"in_channel\"]\n        in_channels_list = [\n            in_channels_stage2 * 2,\n            in_channels_stage2 * 4,\n            in_channels_stage2 * 8,\n        ]\n        out_channels = cfg[\"out_channel\"]\n        self.fpn = FPN(in_channels_list, out_channels)\n        self.ssh1 = SSH(out_channels, out_channels)\n        self.ssh2 = SSH(out_channels, out_channels)\n        self.ssh3 = SSH(out_channels, out_channels)\n\n        self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg[\"out_channel\"])\n        self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg[\"out_channel\"])\n        self.LandmarkHead = self._make_landmark_head(\n            fpn_num=3, inchannels=cfg[\"out_channel\"]\n        )\n\n    def _make_class_head(self, fpn_num=3, inchannels=64, anchor_num=2):\n        classhead = nn.ModuleList()\n        for i in range(fpn_num):\n            classhead.append(ClassHead(inchannels, anchor_num))\n        return classhead\n\n    def _make_bbox_head(self, fpn_num=3, inchannels=64, anchor_num=2):\n        bboxhead = nn.ModuleList()\n        for i in range(fpn_num):\n            bboxhead.append(BboxHead(inchannels, anchor_num))\n        return bboxhead\n\n    def _make_landmark_head(self, fpn_num=3, inchannels=64, anchor_num=2):\n        landmarkhead = nn.ModuleList()\n        for i in range(fpn_num):\n            landmarkhead.append(LandmarkHead(inchannels, anchor_num))\n        return landmarkhead\n\n    def forward(self, inputs):\n        out = self.body(inputs)\n\n        # FPN\n        fpn = self.fpn(out)\n\n        # SSH\n        feature1 = self.ssh1(fpn[0])\n        feature2 = self.ssh2(fpn[1])\n        feature3 = self.ssh3(fpn[2])\n        features = [feature1, feature2, feature3]\n\n        bbox_regressions = torch.cat(\n            [self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1\n        )\n        classifications = torch.cat(\n            [self.ClassHead[i](feature) for i, feature in enumerate(features)], dim=1\n        )\n        ldm_regressions = torch.cat(\n            [self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1\n        )\n\n        if self.phase == \"train\":\n            output = (bbox_regressions, classifications, ldm_regressions)\n        else:\n            output = (\n                bbox_regressions,\n                F.softmax(classifications, dim=-1),\n                ldm_regressions,\n            )\n        return output\n"
  },
  {
    "path": "src/dot/gpen/retinaface/layers/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/retinaface/layers/functions/prior_box.py",
    "content": "#!/usr/bin/env python3\n\nfrom itertools import product as product\nfrom math import ceil\n\nimport torch\n\n\nclass PriorBox(object):\n    def __init__(self, cfg, image_size=None, phase=\"train\"):\n        super(PriorBox, self).__init__()\n        self.min_sizes = cfg[\"min_sizes\"]\n        self.steps = cfg[\"steps\"]\n        self.clip = cfg[\"clip\"]\n        self.image_size = image_size\n        self.feature_maps = [\n            [ceil(self.image_size[0] / step), ceil(self.image_size[1] / step)]\n            for step in self.steps\n        ]\n        self.name = \"s\"\n\n    def forward(self):\n        anchors = []\n        for k, f in enumerate(self.feature_maps):\n            min_sizes = self.min_sizes[k]\n            for i, j in product(range(f[0]), range(f[1])):\n                for min_size in min_sizes:\n                    s_kx = min_size / self.image_size[1]\n                    s_ky = min_size / self.image_size[0]\n                    dense_cx = [\n                        x * self.steps[k] / self.image_size[1] for x in [j + 0.5]\n                    ]\n                    dense_cy = [\n                        y * self.steps[k] / self.image_size[0] for y in [i + 0.5]\n                    ]\n                    for cy, cx in product(dense_cy, dense_cx):\n                        anchors += [cx, cy, s_kx, s_ky]\n\n        # back to torch land\n        output = torch.Tensor(anchors).view(-1, 4)\n        if self.clip:\n            output.clamp_(max=1, min=0)\n        return output\n"
  },
  {
    "path": "src/dot/gpen/retinaface/layers/modules/__init__.py",
    "content": "#!/usr/bin/env python3\n\nfrom .multibox_loss import MultiBoxLoss\n\n__all__ = [\"MultiBoxLoss\"]\n"
  },
  {
    "path": "src/dot/gpen/retinaface/layers/modules/multibox_loss.py",
    "content": "#!/usr/bin/env python3\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ...data import cfg_mnet\nfrom ...utils.box_utils import log_sum_exp, match\n\nGPU = cfg_mnet[\"gpu_train\"]\n\n\nclass MultiBoxLoss(nn.Module):\n    \"\"\"SSD Weighted Loss Function\n    Compute Targets:\n        1) Produce Confidence Target Indices by matching  ground truth boxes\n           with (default) 'priorboxes' that have jaccard index > threshold parameter\n           (default threshold: 0.5).\n        2) Produce localization target by 'encoding' variance into offsets of ground\n           truth boxes and their matched  'priorboxes'.\n        3) Hard negative mining to filter the excessive number of negative examples\n           that comes with using a large number of default bounding boxes.\n           (default negative:positive ratio 3:1)\n    Objective Loss:\n        L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n        Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss\n        weighted by α which is set to 1 by cross val.\n        Args:\n            c: class confidences,\n            l: predicted boxes,\n            g: ground truth boxes\n            N: number of matched default boxes\n        See: https://arxiv.org/pdf/1512.02325.pdf for more details.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes,\n        overlap_thresh,\n        prior_for_matching,\n        bkg_label,\n        neg_mining,\n        neg_pos,\n        neg_overlap,\n        encode_target,\n    ):\n        super(MultiBoxLoss, self).__init__()\n        self.num_classes = num_classes\n        self.threshold = overlap_thresh\n        self.background_label = bkg_label\n        self.encode_target = encode_target\n        self.use_prior_for_matching = prior_for_matching\n        self.do_neg_mining = neg_mining\n        self.negpos_ratio = neg_pos\n        self.neg_overlap = neg_overlap\n        self.variance = [0.1, 0.2]\n\n    def forward(self, predictions, priors, targets):\n        \"\"\"Multibox Loss\n        Args:\n            predictions (tuple): A tuple containing loc preds, conf preds,\n            and prior boxes from SSD net.\n                conf shape: torch.size(batch_size,num_priors,num_classes)\n                loc shape: torch.size(batch_size,num_priors,4)\n                priors shape: torch.size(num_priors,4)\n\n            ground_truth (tensor): Ground truth boxes and labels for a batch,\n                shape: [batch_size,num_objs,5] (last idx is the label).\n        \"\"\"\n\n        loc_data, conf_data, landm_data = predictions\n        priors = priors\n        num = loc_data.size(0)\n        num_priors = priors.size(0)\n\n        # match priors (default boxes) and ground truth boxes\n        loc_t = torch.Tensor(num, num_priors, 4)\n        landm_t = torch.Tensor(num, num_priors, 10)\n        conf_t = torch.LongTensor(num, num_priors)\n        for idx in range(num):\n            truths = targets[idx][:, :4].data\n            labels = targets[idx][:, -1].data\n            landms = targets[idx][:, 4:14].data\n            defaults = priors.data\n            match(\n                self.threshold,\n                truths,\n                defaults,\n                self.variance,\n                labels,\n                landms,\n                loc_t,\n                conf_t,\n                landm_t,\n                idx,\n            )\n        device = \"cpu\"\n        if GPU:\n            device = \"mps\" if torch.backends.mps.is_available() else \"cuda\"\n            loc_t = loc_t.to(device)\n            conf_t = conf_t.to(device)\n            landm_t = landm_t.to(device)\n\n        zeros = torch.tensor(0).to(device)\n        # landm Loss (Smooth L1)\n        # Shape: [batch,num_priors,10]\n        pos1 = conf_t > zeros\n        num_pos_landm = pos1.long().sum(1, keepdim=True)\n        N1 = max(num_pos_landm.data.sum().float(), 1)\n        pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data)\n        landm_p = landm_data[pos_idx1].view(-1, 10)\n        landm_t = landm_t[pos_idx1].view(-1, 10)\n        loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction=\"sum\")\n\n        pos = conf_t != zeros\n        conf_t[pos] = 1\n\n        # Localization Loss (Smooth L1)\n        # Shape: [batch,num_priors,4]\n        pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data)\n        loc_p = loc_data[pos_idx].view(-1, 4)\n        loc_t = loc_t[pos_idx].view(-1, 4)\n        loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction=\"sum\")\n\n        # Compute max conf across batch for hard negative mining\n        batch_conf = conf_data.view(-1, self.num_classes)\n        loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1))\n\n        # Hard Negative Mining\n        loss_c[pos.view(-1, 1)] = 0  # filter out pos boxes for now\n        loss_c = loss_c.view(num, -1)\n        _, loss_idx = loss_c.sort(1, descending=True)\n        _, idx_rank = loss_idx.sort(1)\n        num_pos = pos.long().sum(1, keepdim=True)\n        num_neg = torch.clamp(self.negpos_ratio * num_pos, max=pos.size(1) - 1)\n        neg = idx_rank < num_neg.expand_as(idx_rank)\n\n        # Confidence Loss Including Positive and Negative Examples\n        pos_idx = pos.unsqueeze(2).expand_as(conf_data)\n        neg_idx = neg.unsqueeze(2).expand_as(conf_data)\n        conf_p = conf_data[(pos_idx + neg_idx).gt(0)].view(-1, self.num_classes)\n        targets_weighted = conf_t[(pos + neg).gt(0)]\n        loss_c = F.cross_entropy(conf_p, targets_weighted, reduction=\"sum\")\n\n        # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N\n        N = max(num_pos.data.sum().float(), 1)\n        loss_l /= N\n        loss_c /= N\n        loss_landm /= N1\n\n        return loss_l, loss_c, loss_landm\n"
  },
  {
    "path": "src/dot/gpen/retinaface/retinaface_detection.py",
    "content": "#!/usr/bin/env python3\n\n\"\"\"\n@paper: GAN Prior Embedded Network for Blind Face Restoration in the Wild (CVPR2021)\n@author: yangxy (yangtao9009@gmail.com)\n\"\"\"\nimport os\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\n\nfrom .data import cfg_re50\nfrom .facemodels.retinaface import RetinaFace\nfrom .layers.functions.prior_box import PriorBox\nfrom .utils.box_utils import decode, decode_landm\nfrom .utils.nms.py_cpu_nms import py_cpu_nms\n\n\nclass RetinaFaceDetection(object):\n    def __init__(self, base_dir, network=\"RetinaFace-R50\", use_gpu=True):\n        torch.set_grad_enabled(False)\n        cudnn.benchmark = True\n        self.pretrained_path = os.path.join(base_dir, \"weights\", network + \".pth\")\n        if use_gpu:\n            self.device = \"mps\" if torch.backends.mps.is_available() else \"cuda\"\n        else:\n            self.device = \"cpu\"\n        self.cfg = cfg_re50\n        self.net = RetinaFace(cfg=self.cfg, phase=\"test\")\n        if use_gpu:\n            self.load_model()\n            self.net = self.net.to(self.device)\n        else:\n            self.load_model(load_to_cpu=True)\n            self.net = self.net.cpu()\n\n    def check_keys(self, pretrained_state_dict):\n        ckpt_keys = set(pretrained_state_dict.keys())\n        model_keys = set(self.net.state_dict().keys())\n        used_pretrained_keys = model_keys & ckpt_keys\n        assert len(used_pretrained_keys) > 0, \"load NONE from pretrained checkpoint\"\n        return True\n\n    def remove_prefix(self, state_dict, prefix):\n        \"\"\"Old style model is stored with all names of parameters sharing common prefix 'module.'\"\"\"\n        return {\n            (lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x)(\n                key\n            ): value\n            for key, value in state_dict.items()\n        }\n\n    def load_model(self, load_to_cpu=False):\n        if load_to_cpu:\n            pretrained_dict = torch.load(\n                self.pretrained_path, map_location=lambda storage, loc: storage\n            )\n        else:\n            # pretrained_dict = torch.load(\n            #     self.pretrained_path, map_location=lambda storage, loc: storage.to(\"mps\")#.cuda()\n            # )\n            pretrained_dict = torch.load(self.pretrained_path, map_location=self.device)\n        if \"state_dict\" in pretrained_dict.keys():\n            pretrained_dict = self.remove_prefix(\n                pretrained_dict[\"state_dict\"], \"module.\"\n            )\n        else:\n            pretrained_dict = self.remove_prefix(pretrained_dict, \"module.\")\n        self.check_keys(pretrained_dict)\n        self.net.load_state_dict(pretrained_dict, strict=False)\n        self.net.eval()\n\n    def detect(\n        self,\n        img_raw,\n        resize=1,\n        confidence_threshold=0.9,\n        nms_threshold=0.4,\n        top_k=5000,\n        keep_top_k=750,\n        save_image=False,\n        use_gpu=True,\n    ):\n        img = np.float32(img_raw)\n\n        im_height, im_width = img.shape[:2]\n        scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\n        img -= (104, 117, 123)\n        img = img.transpose(2, 0, 1)\n        img = torch.from_numpy(img).unsqueeze(0)\n        if use_gpu:\n            img = img.to(self.device)\n            scale = scale.to(self.device)\n        else:\n            img = img.cpu()\n            scale = scale.cpu()\n\n        loc, conf, landms = self.net(img)  # forward pass\n\n        priorbox = PriorBox(self.cfg, image_size=(im_height, im_width))\n        priors = priorbox.forward()\n        if use_gpu:\n            priors = priors.to(self.device)\n        else:\n            priors = priors.cpu()\n\n        prior_data = priors.data\n        boxes = decode(loc.data.squeeze(0), prior_data, self.cfg[\"variance\"])\n        boxes = boxes * scale / resize\n        boxes = boxes.cpu().numpy()\n        scores = conf.squeeze(0).data.cpu().numpy()[:, 1]\n        landms = decode_landm(landms.data.squeeze(0), prior_data, self.cfg[\"variance\"])\n        scale1 = torch.Tensor(\n            [\n                img.shape[3],\n                img.shape[2],\n                img.shape[3],\n                img.shape[2],\n                img.shape[3],\n                img.shape[2],\n                img.shape[3],\n                img.shape[2],\n                img.shape[3],\n                img.shape[2],\n            ]\n        )\n        if use_gpu:\n            scale1 = scale1.to(self.device)\n        else:\n            scale1 = scale1.cpu()\n\n        landms = landms * scale1 / resize\n        landms = landms.cpu().numpy()\n\n        # ignore low scores\n        inds = np.where(scores > confidence_threshold)[0]\n        boxes = boxes[inds]\n        landms = landms[inds]\n        scores = scores[inds]\n\n        # keep top-K before NMS\n        order = scores.argsort()[::-1][:top_k]\n        boxes = boxes[order]\n        landms = landms[order]\n        scores = scores[order]\n\n        # do NMS\n        dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)\n        keep = py_cpu_nms(dets, nms_threshold)\n        dets = dets[keep, :]\n        landms = landms[keep]\n\n        # keep top-K faster NMS\n        dets = dets[:keep_top_k, :]\n        landms = landms[:keep_top_k, :]\n\n        # sort faces(delete)\n        \"\"\"\n        fscores = [det[4] for det in dets]\n        sorted_idx = sorted(range(len(fscores)), key=lambda k:fscores[k], reverse=False) # sort index\n        tmp = [landms[idx] for idx in sorted_idx]\n        landms = np.asarray(tmp)\n        \"\"\"\n\n        landms = landms.reshape((-1, 5, 2))\n        landms = landms.transpose((0, 2, 1))\n        landms = landms.reshape(\n            -1,\n            10,\n        )\n        return dets, landms\n"
  },
  {
    "path": "src/dot/gpen/retinaface/utils/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/retinaface/utils/box_utils.py",
    "content": "#!/usr/bin/env python3\n\nimport numpy as np\nimport torch\n\n\ndef point_form(boxes):\n    \"\"\"Convert prior_boxes to (xmin, ymin, xmax, ymax)\n    representation for comparison to point form ground truth data.\n    Args:\n        boxes: (tensor) center-size default boxes from priorbox layers.\n    Return:\n        boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n    \"\"\"\n    return torch.cat(\n        (\n            boxes[:, :2] - boxes[:, 2:] / 2,  # xmin, ymin\n            boxes[:, :2] + boxes[:, 2:] / 2,\n        ),\n        1,\n    )  # xmax, ymax\n\n\ndef center_size(boxes):\n    \"\"\"Convert prior_boxes to (cx, cy, w, h)\n    representation for comparison to center-size form ground truth data.\n    Args:\n        boxes: (tensor) point_form boxes\n    Return:\n        boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n    \"\"\"\n    return torch.cat(\n        (boxes[:, 2:] + boxes[:, :2]) / 2, boxes[:, 2:] - boxes[:, :2], 1  # cx, cy\n    )  # w, h\n\n\ndef intersect(box_a, box_b):\n    \"\"\"We resize both tensors to [A,B,2] without new malloc:\n    [A,2] -> [A,1,2] -> [A,B,2]\n    [B,2] -> [1,B,2] -> [A,B,2]\n    Then we compute the area of intersect between box_a and box_b.\n    Args:\n      box_a: (tensor) bounding boxes, Shape: [A,4].\n      box_b: (tensor) bounding boxes, Shape: [B,4].\n    Return:\n      (tensor) intersection area, Shape: [A,B].\n    \"\"\"\n    A = box_a.size(0)\n    B = box_b.size(0)\n    max_xy = torch.min(\n        box_a[:, 2:].unsqueeze(1).expand(A, B, 2),\n        box_b[:, 2:].unsqueeze(0).expand(A, B, 2),\n    )\n    min_xy = torch.max(\n        box_a[:, :2].unsqueeze(1).expand(A, B, 2),\n        box_b[:, :2].unsqueeze(0).expand(A, B, 2),\n    )\n    inter = torch.clamp((max_xy - min_xy), min=0)\n    return inter[:, :, 0] * inter[:, :, 1]\n\n\ndef jaccard(box_a, box_b):\n    \"\"\"Compute the jaccard overlap of two sets of boxes.  The jaccard overlap\n    is simply the intersection over union of two boxes.  Here we operate on\n    ground truth boxes and default boxes.\n    E.g.:\n        A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n    Args:\n        box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n        box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n    Return:\n        jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n    \"\"\"\n    inter = intersect(box_a, box_b)\n    area_a = (\n        ((box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]))\n        .unsqueeze(1)\n        .expand_as(inter)\n    )  # [A,B]\n    area_b = (\n        ((box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]))\n        .unsqueeze(0)\n        .expand_as(inter)\n    )  # [A,B]\n    union = area_a + area_b - inter\n    return inter / union  # [A,B]\n\n\ndef matrix_iou(a, b):\n    \"\"\"\n    return iou of a and b, numpy version for data augenmentation\n    \"\"\"\n    lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n    rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n    area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n    area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n    area_b = np.prod(b[:, 2:] - b[:, :2], axis=1)\n    return area_i / (area_a[:, np.newaxis] + area_b - area_i)\n\n\ndef matrix_iof(a, b):\n    \"\"\"\n    return iof of a and b, numpy version for data augenmentation\n    \"\"\"\n    lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])\n    rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])\n\n    area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n    area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)\n    return area_i / np.maximum(area_a[:, np.newaxis], 1)\n\n\ndef match(\n    threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx\n):\n    \"\"\"Match each prior box with the ground truth box of the highest jaccard\n    overlap, encode the bounding boxes, then return the matched indices\n    corresponding to both confidence and location preds.\n    Args:\n        threshold: (float) The overlap threshold used when mathing boxes.\n        truths: (tensor) Ground truth boxes, Shape: [num_obj, 4].\n        priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n        variances: (tensor) Variances corresponding to each prior coord,\n            Shape: [num_priors, 4].\n        labels: (tensor) All the class labels for the image, Shape: [num_obj].\n        landms: (tensor) Ground truth landms, Shape [num_obj, 10].\n        loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n        conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds.\n        landm_t: (tensor) Tensor to be filled w/ endcoded landm targets.\n        idx: (int) current batch index\n    Return:\n        The matched indices corresponding to 1)location 2)confidence 3)landm preds.\n    \"\"\"\n    # jaccard index\n    overlaps = jaccard(truths, point_form(priors))\n    # (Bipartite Matching)\n    # [1,num_objects] best prior for each ground truth\n    best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)\n\n    # ignore hard gt\n    valid_gt_idx = best_prior_overlap[:, 0] >= 0.2\n    best_prior_idx_filter = best_prior_idx[valid_gt_idx, :]\n    if best_prior_idx_filter.shape[0] <= 0:\n        loc_t[idx] = 0\n        conf_t[idx] = 0\n        return\n\n    # [1,num_priors] best ground truth for each prior\n    best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)\n    best_truth_idx.squeeze_(0)\n    best_truth_overlap.squeeze_(0)\n    best_prior_idx.squeeze_(1)\n    best_prior_idx_filter.squeeze_(1)\n    best_prior_overlap.squeeze_(1)\n    best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2)  # ensure best prior\n    # TODO refactor: index  best_prior_idx with long tensor\n    # ensure every gt matches with its prior of max overlap\n    for j in range(best_prior_idx.size(0)):\n        best_truth_idx[best_prior_idx[j]] = j\n    matches = truths[best_truth_idx]  # Shape: [num_priors,4]\n    conf = labels[best_truth_idx]  # Shape: [num_priors]\n    conf[best_truth_overlap < threshold] = 0  # label as background\n    loc = encode(matches, priors, variances)\n\n    matches_landm = landms[best_truth_idx]\n    landm = encode_landm(matches_landm, priors, variances)\n    loc_t[idx] = loc  # [num_priors,4] encoded offsets to learn\n    conf_t[idx] = conf  # [num_priors] top class label for each prior\n    landm_t[idx] = landm\n\n\ndef encode(matched, priors, variances):\n    \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n    we have matched (based on jaccard overlap) with the prior boxes.\n    Args:\n        matched: (tensor) Coords of ground truth for each prior in point-form\n            Shape: [num_priors, 4].\n        priors: (tensor) Prior boxes in center-offset form\n            Shape: [num_priors,4].\n        variances: (list[float]) Variances of priorboxes\n    Return:\n        encoded boxes (tensor), Shape: [num_priors, 4]\n    \"\"\"\n\n    # dist b/t match center and prior's center\n    g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]\n    # encode variance\n    g_cxcy /= variances[0] * priors[:, 2:]\n    # match wh / prior wh\n    g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n    g_wh = torch.log(g_wh) / variances[1]\n    # return target for smooth_l1_loss\n    return torch.cat([g_cxcy, g_wh], 1)  # [num_priors,4]\n\n\ndef encode_landm(matched, priors, variances):\n    \"\"\"Encode the variances from the priorbox layers into the ground truth boxes\n    we have matched (based on jaccard overlap) with the prior boxes.\n    Args:\n        matched: (tensor) Coords of ground truth for each prior in point-form\n            Shape: [num_priors, 10].\n        priors: (tensor) Prior boxes in center-offset form\n            Shape: [num_priors,4].\n        variances: (list[float]) Variances of priorboxes\n    Return:\n        encoded landm (tensor), Shape: [num_priors, 10]\n    \"\"\"\n\n    # dist b/t match center and prior's center\n    matched = torch.reshape(matched, (matched.size(0), 5, 2))\n    priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n    priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n    priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n    priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)\n    priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)\n    g_cxcy = matched[:, :, :2] - priors[:, :, :2]\n    # encode variance\n    g_cxcy /= variances[0] * priors[:, :, 2:]\n    g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)\n    # return target for smooth_l1_loss\n    return g_cxcy\n\n\n# Adapted from https://github.com/Hakuyume/chainer-ssd\ndef decode(loc, priors, variances):\n    \"\"\"Decode locations from predictions using priors to undo\n    the encoding we did for offset regression at train time.\n    Args:\n        loc (tensor): location predictions for loc layers,\n            Shape: [num_priors,4]\n        priors (tensor): Prior boxes in center-offset form.\n            Shape: [num_priors,4].\n        variances: (list[float]) Variances of priorboxes\n    Return:\n        decoded bounding box predictions\n    \"\"\"\n\n    boxes = torch.cat(\n        (\n            priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n            priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1]),\n        ),\n        1,\n    )\n    boxes[:, :2] -= boxes[:, 2:] / 2\n    boxes[:, 2:] += boxes[:, :2]\n    return boxes\n\n\ndef decode_landm(pre, priors, variances):\n    \"\"\"Decode landm from predictions using priors to undo\n    the encoding we did for offset regression at train time.\n    Args:\n        pre (tensor): landm predictions for loc layers,\n            Shape: [num_priors,10]\n        priors (tensor): Prior boxes in center-offset form.\n            Shape: [num_priors,4].\n        variances: (list[float]) Variances of priorboxes\n    Return:\n        decoded landm predictions\n    \"\"\"\n    landms = torch.cat(\n        (\n            priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],\n            priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],\n            priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],\n            priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],\n            priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],\n        ),\n        dim=1,\n    )\n    return landms\n\n\ndef log_sum_exp(x):\n    \"\"\"Utility function for computing log_sum_exp while determining\n    This will be used to determine unaveraged confidence loss across\n    all examples in a batch.\n    Args:\n        x (Variable(tensor)): conf_preds from conf layers\n    \"\"\"\n    x_max = x.data.max()\n    return torch.log(torch.sum(torch.exp(x - x_max), 1, keepdim=True)) + x_max\n\n\n# Original author: Francisco Massa:\n# https://github.com/fmassa/object-detection.torch\n# Ported to PyTorch by Max deGroot (02/01/2017)\ndef nms(boxes, scores, overlap=0.5, top_k=200):\n    \"\"\"Apply non-maximum suppression at test time to avoid detecting too many\n    overlapping bounding boxes for a given object.\n    Args:\n        boxes: (tensor) The location preds for the img, Shape: [num_priors,4].\n        scores: (tensor) The class predscores for the img, Shape:[num_priors].\n        overlap: (float) The overlap thresh for suppressing unnecessary boxes.\n        top_k: (int) The Maximum number of box preds to consider.\n    Return:\n        The indices of the kept boxes with respect to num_priors.\n    \"\"\"\n\n    keep = torch.Tensor(scores.size(0)).fill_(0).long()\n    if boxes.numel() == 0:\n        return keep\n    x1 = boxes[:, 0]\n    y1 = boxes[:, 1]\n    x2 = boxes[:, 2]\n    y2 = boxes[:, 3]\n    area = torch.mul(x2 - x1, y2 - y1)\n    v, idx = scores.sort(0)  # sort in ascending order\n    idx = idx[-top_k:]  # indices of the top-k largest vals\n    xx1 = boxes.new()\n    yy1 = boxes.new()\n    xx2 = boxes.new()\n    yy2 = boxes.new()\n    w = boxes.new()\n    h = boxes.new()\n\n    count = 0\n    while idx.numel() > 0:\n        i = idx[-1]  # index of current largest val\n        keep[count] = i\n        count += 1\n        if idx.size(0) == 1:\n            break\n        idx = idx[:-1]  # remove kept element from view\n        # load bboxes of next highest vals\n        torch.index_select(x1, 0, idx, out=xx1)\n        torch.index_select(y1, 0, idx, out=yy1)\n        torch.index_select(x2, 0, idx, out=xx2)\n        torch.index_select(y2, 0, idx, out=yy2)\n        # store element-wise max with next highest score\n        xx1 = torch.clamp(xx1, min=x1[i])\n        yy1 = torch.clamp(yy1, min=y1[i])\n        xx2 = torch.clamp(xx2, max=x2[i])\n        yy2 = torch.clamp(yy2, max=y2[i])\n        w.resize_as_(xx2)\n        h.resize_as_(yy2)\n        w = xx2 - xx1\n        h = yy2 - yy1\n        # check sizes of xx1 and xx2.. after each iteration\n        w = torch.clamp(w, min=0.0)\n        h = torch.clamp(h, min=0.0)\n        inter = w * h\n\n        rem_areas = torch.index_select(area, 0, idx)  # load remaining areas)\n        union = (rem_areas - inter) + area[i]\n        IoU = inter / union  # store result in iou\n        # keep only elements with an IoU <= overlap\n        idx = idx[IoU.le(overlap)]\n    return keep, count\n"
  },
  {
    "path": "src/dot/gpen/retinaface/utils/nms/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/gpen/retinaface/utils/nms/py_cpu_nms.py",
    "content": "#!/usr/bin/env python3\n\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport numpy as np\n\n\ndef py_cpu_nms(dets, thresh):\n    \"\"\"Pure Python NMS baseline.\"\"\"\n    x1 = dets[:, 0]\n    y1 = dets[:, 1]\n    x2 = dets[:, 2]\n    y2 = dets[:, 3]\n    scores = dets[:, 4]\n\n    areas = (x2 - x1 + 1) * (y2 - y1 + 1)\n    order = scores.argsort()[::-1]\n\n    keep = []\n    while order.size > 0:\n        i = order[0]\n        keep.append(i)\n        xx1 = np.maximum(x1[i], x1[order[1:]])\n        yy1 = np.maximum(y1[i], y1[order[1:]])\n        xx2 = np.minimum(x2[i], x2[order[1:]])\n        yy2 = np.minimum(y2[i], y2[order[1:]])\n\n        w = np.maximum(0.0, xx2 - xx1 + 1)\n        h = np.maximum(0.0, yy2 - yy1 + 1)\n        inter = w * h\n        ovr = inter / (areas[i] + areas[order[1:]] - inter)\n\n        inds = np.where(ovr <= thresh)[0]\n        order = order[inds + 1]\n\n    return keep\n"
  },
  {
    "path": "src/dot/gpen/retinaface/utils/timer.py",
    "content": "#!/usr/bin/env python3\n\n# --------------------------------------------------------\n# Fast R-CNN\n# Copyright (c) 2015 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ross Girshick\n# --------------------------------------------------------\n\nimport time\n\n\nclass Timer(object):\n    \"\"\"A simple timer.\"\"\"\n\n    def __init__(self):\n        self.total_time = 0.0\n        self.calls = 0\n        self.start_time = 0.0\n        self.diff = 0.0\n        self.average_time = 0.0\n\n    def tic(self):\n        # using time.time instead of time.clock because time time.clock\n        # does not normalize for multithreading\n        self.start_time = time.time()\n\n    def toc(self, average=True):\n        self.diff = time.time() - self.start_time\n        self.total_time += self.diff\n        self.calls += 1\n        self.average_time = self.total_time / self.calls\n        if average:\n            return self.average_time\n        else:\n            return self.diff\n\n    def clear(self):\n        self.total_time = 0.0\n        self.calls = 0\n        self.start_time = 0.0\n        self.diff = 0.0\n        self.average_time = 0.0\n"
  },
  {
    "path": "src/dot/simswap/__init__.py",
    "content": "#!/usr/bin/env python3\n\nfrom .option import SimswapOption\n\n__all__ = [\"SimswapOption\"]\n"
  },
  {
    "path": "src/dot/simswap/configs/config.yaml",
    "content": "---\nanalysis:\n    simswap:\n        parsing_model_path: saved_models/simswap/parsing_model/checkpoint/79999_iter.pth\n        checkpoints_dir: saved_models/simswap/checkpoints\n        arcface_model_path: saved_models/simswap/arcface_model/arcface_checkpoint.tar\n        detection_threshold: 0.6\n        det_size: [640, 640]\n        use_gpu: true\n        show_fps: true\n        opt_verbose: false\n        opt_crop_size: 224\n        opt_gpu_ids: 0\n        opt_fp16: false\n        opt_use_mask: true\n        opt_name: people\n        opt_resize_or_crop: scale_width\n        opt_load_pretrain: ''\n        opt_which_epoch: latest\n        opt_continue_train: store_true\n        gpen: gpen_256\n        gpen_path: saved_models/gpen\n"
  },
  {
    "path": "src/dot/simswap/configs/config_512.yaml",
    "content": "---\nanalysis:\n    simswap:\n        parsing_model_path: saved_models/simswap/parsing_model/checkpoint/79999_iter.pth\n        checkpoints_dir: saved_models/simswap/checkpoints\n        arcface_model_path: saved_models/simswap/arcface_model/arcface_checkpoint.tar\n        detection_threshold: 0.6\n        det_size: [640, 640]\n        use_gpu: true\n        show_fps: true\n        opt_verbose: false\n        opt_crop_size: 512\n        opt_gpu_ids: 0\n        opt_fp16: false\n        opt_use_mask: true\n        opt_name: people\n        opt_resize_or_crop: scale_width\n        opt_load_pretrain: ''\n        opt_which_epoch: '550000'\n        opt_continue_train: store_true\n        gpen: gpen_256\n        gpen_path: saved_models/gpen\n"
  },
  {
    "path": "src/dot/simswap/fs_model.py",
    "content": "#!/usr/bin/env python3\n\nimport os\nimport sys\n\nimport torch\n\nfrom .models.base_model import BaseModel\n\n\ndef determine_path():\n    \"\"\"\n    Find the script path\n    \"\"\"\n    try:\n        root = __file__\n        if os.path.islink(root):\n            root = os.path.realpath(root)\n\n        return os.path.dirname(os.path.abspath(root))\n    except Exception as e:\n        print(e)\n        print(\"I'm sorry, but something is wrong.\")\n        print(\"There is no __file__ variable. Please contact the author.\")\n        sys.exit()\n\n\nsys.path.insert(0, determine_path())\n\n# TODO: Move this class inside models\n\n\nclass fsModel(BaseModel):\n    def name(self):\n        return \"fsModel\"\n\n    def initialize(\n        self,\n        opt_gpu_ids,\n        opt_checkpoints_dir,\n        opt_name,\n        opt_verbose,\n        opt_crop_size,\n        opt_resize_or_crop,\n        opt_load_pretrain,\n        opt_which_epoch,\n        opt_continue_train,\n        arcface_model_path,\n        use_gpu=True,\n    ):\n\n        BaseModel.initialize(\n            self, opt_gpu_ids, opt_checkpoints_dir, opt_name, opt_verbose\n        )\n        torch.backends.cudnn.benchmark = True\n\n        if use_gpu:\n            device = torch.device(\n                \"mps\" if torch.backends.mps.is_available() else \"cuda\"\n            )\n        else:\n            device = torch.device(\"cpu\")\n\n        if opt_crop_size == 224:\n            from .models.fs_networks import Generator_Adain_Upsample\n        elif opt_crop_size == 512:\n            from .models.fs_networks_512 import Generator_Adain_Upsample\n\n        # Generator network\n        self.netG = Generator_Adain_Upsample(\n            input_nc=3, output_nc=3, latent_size=512, n_blocks=9, deep=False\n        )\n        self.netG.to(device)\n\n        # Id network\n        if use_gpu:\n            netArc_checkpoint = torch.load(arcface_model_path)\n        else:\n            netArc_checkpoint = torch.load(\n                arcface_model_path, map_location=torch.device(\"cpu\")\n            )\n\n        self.netArc = netArc_checkpoint\n        self.netArc = self.netArc.to(device)\n        self.netArc.eval()\n\n        pretrained_path = \"\"\n        self.load_network(self.netG, \"G\", opt_which_epoch, pretrained_path)\n        return\n\n    def forward(self, img_id, img_att, latent_id, latent_att, for_G=False):\n        img_fake = self.netG.forward(img_att, latent_id)\n\n        return img_fake\n\n\ndef create_model(\n    opt_verbose,\n    opt_crop_size,\n    opt_fp16,\n    opt_gpu_ids,\n    opt_checkpoints_dir,\n    opt_name,\n    opt_resize_or_crop,\n    opt_load_pretrain,\n    opt_which_epoch,\n    opt_continue_train,\n    arcface_model_path,\n    use_gpu=True,\n):\n\n    model = fsModel()\n\n    model.initialize(\n        opt_gpu_ids,\n        opt_checkpoints_dir,\n        opt_name,\n        opt_verbose,\n        opt_crop_size,\n        opt_resize_or_crop,\n        opt_load_pretrain,\n        opt_which_epoch,\n        opt_continue_train,\n        arcface_model_path,\n        use_gpu=use_gpu,\n    )\n\n    if opt_verbose:\n        print(\"model [%s] was created\" % (model.name()))\n\n    return model\n"
  },
  {
    "path": "src/dot/simswap/mediapipe/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/simswap/mediapipe/face_mesh.py",
    "content": "#!/usr/bin/env python3\r\n\r\nfrom typing import List, Optional, Tuple\r\n\r\nimport cv2\r\nimport mediapipe as mp\r\nimport numpy as np\r\nfrom mediapipe.framework.formats.landmark_pb2 import NormalizedLandmark\r\n\r\nfrom .utils import face_align_ffhqandnewarc as face_align\r\nfrom .utils import mediapipe_landmarks\r\n\r\nmp_face_mesh = mp.solutions.face_mesh\r\n\r\n\r\nclass FaceMesh:\r\n    \"\"\"Wrapper class of Mediapipe's FaceMesh module. Extracts facial landmarks\r\n    and performs face alignment.\r\n\r\n    Args:\r\n        static_image_mode (bool, optional):\r\n            Indicates whether to treat input images as separated images(not video-stream). Defaults to True.\r\n        max_num_faces (int, optional):\r\n            Maximum allowed faces to examine in single image. Defaults to 1.\r\n        refine_landmarks (bool, optional):\r\n            Used to reduce jitter across multiple input images. Ignored if `static_image_mode = True`. Defaults to True.\r\n        min_detection_confidence (float, optional):\r\n            Threshold for a detection to considered successfull. Defaults to 0.5.\r\n        mode (str, optional):\r\n            Either ['None' | 'ffhq']. Instructs `estimate_norm` function for face alignment mode. Defaults to \"None\".\r\n    \"\"\"\r\n\r\n    def __init__(\r\n        self,\r\n        static_image_mode: bool = True,\r\n        max_num_faces: int = 1,\r\n        refine_landmarks: bool = True,\r\n        min_detection_confidence: float = 0.5,\r\n        mode: str = \"None\",\r\n    ):\r\n        self.MediaPipeIds = mediapipe_landmarks.MediaPipeLandmarks\r\n        self.static_image_mode = static_image_mode\r\n        self.max_num_faces = max_num_faces\r\n        self.refine_landmarks = refine_landmarks\r\n        self.min_detection_confidence = min_detection_confidence\r\n        self.mode = mode\r\n\r\n    def _get_centroid(self, landmarks: List[NormalizedLandmark]) -> Tuple[float, float]:\r\n        \"\"\"Given a set of normalized landmarks/points finds centroid point\r\n\r\n        Args:\r\n            landmarks (List[NormalizedLandmark]): List of relative points that form a polygon\r\n\r\n        Returns:\r\n            Tuple[float, float]: x,y coordinates of polygon centroid\r\n        \"\"\"\r\n        x_li = [landmark.x for landmark in landmarks]\r\n        y_li = [landmark.y for landmark in landmarks]\r\n        _len = len(landmarks)\r\n        return sum(x_li) / _len, sum(y_li) / _len\r\n\r\n    def get_face_landmarks(self, image: np.ndarray) -> Optional[np.array]:\r\n        \"\"\"Calls FaceMesh module from Mediapipe and retrieves related landmarks.\r\n        The order of landmarks is important for face alignment\r\n\r\n        landmarks: [\r\n            [\r\n                Left Eye,\r\n                Right Eye,\r\n                Nose Tip,\r\n                Left Mouth Tip,\r\n                Right Mouth Tip\r\n            ]\r\n        ]\r\n        Extracted landmarks are normalized points based on width/height of the image\r\n\r\n        @Eyes, Mediapipe returns a list of landmarks that forms a polygon\r\n        `_get_centroid` method returns middle point\r\n        @Mouth, Mediapipe returns a list of landmarks that forms a polygon.\r\n        Only edge points are needed, `min/max` on x-axis\r\n\r\n\r\n        Args:\r\n            image (np.ndarray): [description]\r\n\r\n        Returns:\r\n            Optional[np.array]: [description]\r\n        \"\"\"\r\n        # keypoints for all detected faces\r\n        detection_kpss = []\r\n        with mp_face_mesh.FaceMesh(\r\n            static_image_mode=self.static_image_mode,\r\n            max_num_faces=self.max_num_faces,\r\n            refine_landmarks=self.refine_landmarks,\r\n            min_detection_confidence=self.min_detection_confidence,\r\n        ) as face_mesh:\r\n\r\n            # convert BGR image to RGB before processing.\r\n            detection = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\r\n            if not detection.multi_face_landmarks:\r\n                return None\r\n\r\n            # get width/height to de-normalize relative points\r\n            height, width, _ = image.shape\r\n            for face_landmarks in detection.multi_face_landmarks:\r\n                landmark_arr = np.empty((5, 2))\r\n                # left eye: gets landmarks(polygon) and calculates center point\r\n                left_eye = [\r\n                    face_landmarks.landmark[pt]\r\n                    for pt in self.MediaPipeIds.LEFT_EYE_OUTER\r\n                ]\r\n                centroid_left_eye = self._get_centroid(left_eye)\r\n                landmark_arr[0] = np.array(\r\n                    (centroid_left_eye[0] * width, centroid_left_eye[1] * height)\r\n                )\r\n\r\n                # right eye: gets landmarks(polygon) and calculates center point\r\n                right_eye = [\r\n                    face_landmarks.landmark[pt]\r\n                    for pt in self.MediaPipeIds.RIGHT_EYE_OUTER\r\n                ]\r\n                centroid_right_eye = self._get_centroid(right_eye)\r\n                landmark_arr[1] = np.array(\r\n                    (centroid_right_eye[0] * width, centroid_right_eye[1] * height)\r\n                )\r\n\r\n                # nose tip\r\n                nose_landmark = face_landmarks.landmark[self.MediaPipeIds.NOSE_TIP]\r\n                landmark_arr[2] = np.array(\r\n                    (nose_landmark.x * width, nose_landmark.y * height)\r\n                )\r\n\r\n                # mouth region: finds the most left and most right point of outer lips region\r\n                lips_outer_landmarks = [\r\n                    face_landmarks.landmark[pt] for pt in self.MediaPipeIds.LIPS_OUTER\r\n                ]\r\n                mouth_most_left_point = min(lips_outer_landmarks, key=lambda x: x.x)\r\n                mouth_most_right_point = max(lips_outer_landmarks, key=lambda x: x.x)\r\n                landmark_arr[3] = np.array(\r\n                    (\r\n                        mouth_most_left_point.x * width,\r\n                        mouth_most_left_point.y * height,\r\n                    )\r\n                )\r\n                landmark_arr[4] = np.array(\r\n                    (\r\n                        mouth_most_right_point.x * width,\r\n                        mouth_most_right_point.y * height,\r\n                    )\r\n                )\r\n                detection_kpss.append(landmark_arr)\r\n\r\n        return np.array(detection_kpss)\r\n\r\n    def get(\r\n        self, image: np.ndarray, crop_size: Tuple[int, int]\r\n    ) -> Optional[Tuple[List, List]]:\r\n        \"\"\"Driver method of face alignment\r\n\r\n        Args:\r\n            image (np.ndarray): raw cv2 image\r\n            crop_size (Tuple[int, int]): face alignment crop size\r\n\r\n        Returns:\r\n            Optional[Tuple[List, List]]: List of face aligned images for each detected person\r\n        \"\"\"\r\n        # gets facial landmarks using Face_Mesh model from MediaPipe\r\n        landmarks = self.get_face_landmarks(image)\r\n        if landmarks is None:\r\n            print(\"ERROR: No face detected!\")\r\n            return None\r\n\r\n        align_img_list = []\r\n        M_list = []\r\n        for i in range(landmarks.shape[0]):\r\n            kps = landmarks[i]\r\n            M, _ = face_align.estimate_norm(kps, crop_size, self.mode)\r\n            align_img = cv2.warpAffine(\r\n                image, M, (crop_size, crop_size), borderValue=0.0\r\n            )\r\n            align_img_list.append(align_img)\r\n            M_list.append(M)\r\n\r\n        return align_img_list, M_list\r\n"
  },
  {
    "path": "src/dot/simswap/mediapipe/utils/face_align_ffhqandnewarc.py",
    "content": "#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nfrom skimage import transform as trans\n\nsrc1 = np.array(\n    [\n        [51.642, 50.115],\n        [57.617, 49.990],\n        [35.740, 69.007],\n        [51.157, 89.050],\n        [57.025, 89.702],\n    ],\n    dtype=np.float32,\n)\n# <--left\nsrc2 = np.array(\n    [\n        [45.031, 50.118],\n        [65.568, 50.872],\n        [39.677, 68.111],\n        [45.177, 86.190],\n        [64.246, 86.758],\n    ],\n    dtype=np.float32,\n)\n\n# ---frontal\nsrc3 = np.array(\n    [\n        [39.730, 51.138],\n        [72.270, 51.138],\n        [56.000, 68.493],\n        [42.463, 87.010],\n        [69.537, 87.010],\n    ],\n    dtype=np.float32,\n)\n\n# -->right\nsrc4 = np.array(\n    [\n        [46.845, 50.872],\n        [67.382, 50.118],\n        [72.737, 68.111],\n        [48.167, 86.758],\n        [67.236, 86.190],\n    ],\n    dtype=np.float32,\n)\n\n# -->right profile\nsrc5 = np.array(\n    [\n        [54.796, 49.990],\n        [60.771, 50.115],\n        [76.673, 69.007],\n        [55.388, 89.702],\n        [61.257, 89.050],\n    ],\n    dtype=np.float32,\n)\n\nsrc = np.array([src1, src2, src3, src4, src5])\nsrc_map = src\n\nffhq_src = np.array(\n    [\n        [192.98138, 239.94708],\n        [318.90277, 240.1936],\n        [256.63416, 314.01935],\n        [201.26117, 371.41043],\n        [313.08905, 371.15118],\n    ]\n)\nffhq_src = np.expand_dims(ffhq_src, axis=0)\n\n\n# lmk is prediction; src is template\ndef estimate_norm(lmk, image_size=112, mode=\"ffhq\"):\n    assert lmk.shape == (5, 2)\n    tform = trans.SimilarityTransform()\n    lmk_tran = np.insert(lmk, 2, values=np.ones(5), axis=1)\n    min_M = []\n    min_index = []\n    min_error = float(\"inf\")\n    if mode == \"ffhq\":\n        src = ffhq_src * image_size / 512\n    else:\n        src = src_map * image_size / 112\n    for i in np.arange(src.shape[0]):\n        tform.estimate(lmk, src[i])\n        M = tform.params[0:2, :]\n        results = np.dot(M, lmk_tran.T)\n        results = results.T\n        error = np.sum(np.sqrt(np.sum((results - src[i]) ** 2, axis=1)))\n\n        if error < min_error:\n            min_error = error\n            min_M = M\n            min_index = i\n    return min_M, min_index\n\n\ndef norm_crop(img, landmark, image_size=112, mode=\"ffhq\"):\n    if mode == \"Both\":\n        M_None, _ = estimate_norm(landmark, image_size, mode=\"newarc\")\n        M_ffhq, _ = estimate_norm(landmark, image_size, mode=\"ffhq\")\n        warped_None = cv2.warpAffine(\n            img, M_None, (image_size, image_size), borderValue=0.0\n        )\n        warped_ffhq = cv2.warpAffine(\n            img, M_ffhq, (image_size, image_size), borderValue=0.0\n        )\n        return warped_ffhq, warped_None\n    else:\n        M, pose_index = estimate_norm(landmark, image_size, mode)\n        warped = cv2.warpAffine(img, M, (image_size, image_size), borderValue=0.0)\n        return warped\n\n\ndef square_crop(im, S):\n    if im.shape[0] > im.shape[1]:\n        height = S\n        width = int(float(im.shape[1]) / im.shape[0] * S)\n        scale = float(S) / im.shape[0]\n    else:\n        width = S\n        height = int(float(im.shape[0]) / im.shape[1] * S)\n        scale = float(S) / im.shape[1]\n    resized_im = cv2.resize(im, (width, height))\n    det_im = np.zeros((S, S, 3), dtype=np.uint8)\n    det_im[: resized_im.shape[0], : resized_im.shape[1], :] = resized_im\n    return det_im, scale\n\n\ndef transform(data, center, output_size, scale, rotation):\n    scale_ratio = scale\n    rot = float(rotation) * np.pi / 180.0\n    t1 = trans.SimilarityTransform(scale=scale_ratio)\n    cx = center[0] * scale_ratio\n    cy = center[1] * scale_ratio\n    t2 = trans.SimilarityTransform(translation=(-1 * cx, -1 * cy))\n    t3 = trans.SimilarityTransform(rotation=rot)\n    t4 = trans.SimilarityTransform(translation=(output_size / 2, output_size / 2))\n    t = t1 + t2 + t3 + t4\n    M = t.params[0:2]\n    cropped = cv2.warpAffine(data, M, (output_size, output_size), borderValue=0.0)\n    return cropped, M\n\n\ndef trans_points2d(pts, M):\n    new_pts = np.zeros(shape=pts.shape, dtype=np.float32)\n    for i in range(pts.shape[0]):\n        pt = pts[i]\n        new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32)\n        new_pt = np.dot(M, new_pt)\n        new_pts[i] = new_pt[0:2]\n\n    return new_pts\n\n\ndef trans_points3d(pts, M):\n    scale = np.sqrt(M[0][0] * M[0][0] + M[0][1] * M[0][1])\n    new_pts = np.zeros(shape=pts.shape, dtype=np.float32)\n    for i in range(pts.shape[0]):\n        pt = pts[i]\n        new_pt = np.array([pt[0], pt[1], 1.0], dtype=np.float32)\n        new_pt = np.dot(M, new_pt)\n        new_pts[i][0:2] = new_pt[0:2]\n        new_pts[i][2] = pts[i][2] * scale\n\n    return new_pts\n\n\ndef trans_points(pts, M):\n    if pts.shape[1] == 2:\n        return trans_points2d(pts, M)\n    else:\n        return trans_points3d(pts, M)\n"
  },
  {
    "path": "src/dot/simswap/mediapipe/utils/mediapipe_landmarks.py",
    "content": "#!/usr/bin/env python3\n\n\nclass MediaPipeLandmarks:\n    \"\"\"Defines facial landmark indexes for Google's MediaPipe\"\"\"\n\n    LIPS_OUTER = [\n        61,\n        185,\n        40,\n        39,\n        37,\n        0,\n        267,\n        269,\n        270,\n        409,\n        291,\n        375,\n        321,\n        405,\n        314,\n        17,\n        84,\n        181,\n        91,\n        146,\n        61,\n    ]\n    LIPS_INNER = [\n        78,\n        191,\n        80,\n        81,\n        82,\n        13,\n        312,\n        311,\n        310,\n        415,\n        308,\n        324,\n        318,\n        402,\n        317,\n        14,\n        87,\n        178,\n        88,\n        95,\n        78,\n    ]\n    RIGHT_EYE_OUTER = [\n        463,\n        414,\n        286,\n        258,\n        257,\n        259,\n        260,\n        467,\n        359,\n        255,\n        339,\n        254,\n        253,\n        252,\n        256,\n        341,\n    ]\n    LEFT_EYE_OUTER = [\n        130,\n        247,\n        30,\n        29,\n        27,\n        28,\n        56,\n        190,\n        243,\n        112,\n        26,\n        22,\n        23,\n        24,\n        110,\n        25,\n    ]\n    NOSE_TIP = 4\n"
  },
  {
    "path": "src/dot/simswap/models/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/simswap/models/arcface_models.py",
    "content": "import math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import Parameter\n\nfrom dot.simswap.parsing_model.resnet import conv3x3\n\n\nclass SEBlock(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(SEBlock, self).__init__()\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        self.fc = nn.Sequential(\n            nn.Linear(channel, channel // reduction),\n            nn.PReLU(),\n            nn.Linear(channel // reduction, channel),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        b, c, _, _ = x.size()\n        y = self.avg_pool(x).view(b, c)\n        y = self.fc(y).view(b, c, 1, 1)\n        return x * y\n\n\nclass IRBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):\n        super(IRBlock, self).__init__()\n        self.bn0 = nn.BatchNorm2d(inplanes)\n        self.conv1 = conv3x3(inplanes, inplanes)\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.prelu = nn.PReLU()\n        self.conv2 = conv3x3(inplanes, planes, stride)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.downsample = downsample\n        self.stride = stride\n        self.use_se = use_se\n        if self.use_se:\n            self.se = SEBlock(planes)\n\n    def forward(self, x):\n        residual = x\n        out = self.bn0(x)\n        out = self.conv1(out)\n        out = self.bn1(out)\n        out = self.prelu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        if self.use_se:\n            out = self.se(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.prelu(out)\n\n        return out\n\n\nclass ResNet(nn.Module):\n    def __init__(self, block, layers, use_se=True):\n        self.inplanes = 64\n        self.use_se = use_se\n        super(ResNet, self).__init__()\n        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(64)\n        self.prelu = nn.PReLU()\n        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n        self.layer1 = self._make_layer(block, 64, layers[0])\n        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n        self.bn2 = nn.BatchNorm2d(512)\n        self.dropout = nn.Dropout()\n        self.fc = nn.Linear(512 * 7 * 7, 512)\n        self.bn3 = nn.BatchNorm1d(512)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.xavier_normal_(m.weight)\n            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n                nn.init.constant_(m.weight, 1)\n                nn.init.constant_(m.bias, 0)\n            elif isinstance(m, nn.Linear):\n                nn.init.xavier_normal_(m.weight)\n                nn.init.constant_(m.bias, 0)\n\n    def _make_layer(self, block, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(\n                    self.inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=False,\n                ),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(\n            block(self.inplanes, planes, stride, downsample, use_se=self.use_se)\n        )\n        self.inplanes = planes\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes, use_se=self.use_se))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.prelu(x)\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n        x = self.layer2(x)\n        x = self.layer3(x)\n        x = self.layer4(x)\n\n        x = self.bn2(x)\n        x = self.dropout(x)\n        # feature = x\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n        x = self.bn3(x)\n\n        return x\n\n\nclass ArcMarginModel(nn.Module):\n    def __init__(self, args):\n        super(ArcMarginModel, self).__init__()\n\n        num_classes = 93431\n        self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size))\n        nn.init.xavier_uniform_(self.weight)\n\n        self.easy_margin = args.easy_margin\n        self.m = args.margin_m\n        self.s = args.margin_s\n\n        self.cos_m = math.cos(self.m)\n        self.sin_m = math.sin(self.m)\n        self.th = math.cos(math.pi - self.m)\n        self.mm = math.sin(math.pi - self.m) * self.m\n\n    def forward(self, input, label):\n        x = F.normalize(input)\n        W = F.normalize(self.weight)\n        cosine = F.linear(x, W)\n        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))\n        phi = cosine * self.cos_m - sine * self.sin_m  # cos(theta + m)\n        if self.easy_margin:\n            phi = torch.where(cosine > 0, phi, cosine)\n        else:\n            phi = torch.where(cosine > self.th, phi, cosine - self.mm)\n\n        device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n        one_hot = torch.zeros(cosine.size(), device=device)\n        one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)\n        output *= self.s\n        return output\n"
  },
  {
    "path": "src/dot/simswap/models/base_model.py",
    "content": "#!/usr/bin/env python3\n\nimport os\nimport sys\n\nimport torch\n\n\nclass BaseModel(torch.nn.Module):\n    def name(self):\n        return \"BaseModel\"\n\n    def initialize(self, opt_gpu_ids, opt_checkpoints_dir, opt_name, opt_verbose):\n        self.gpu_ids = opt_gpu_ids\n        self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor\n        self.save_dir = os.path.join(opt_checkpoints_dir, opt_name)\n        self.opt_verbose = opt_verbose\n\n    def set_input(self, input):\n        self.input = input\n\n    def forward(self):\n        pass\n\n    # used in test time, no backprop\n    def test(self):\n        pass\n\n    def get_image_paths(self):\n        pass\n\n    def optimize_parameters(self):\n        pass\n\n    def get_current_visuals(self):\n        return self.input\n\n    def get_current_errors(self):\n        return {}\n\n    def save(self, label):\n        pass\n\n    # helper saving function that can be used by subclasses\n    def save_network(self, network, network_label, epoch_label, gpu_ids):\n        save_filename = \"%s_net_%s.pth\" % (epoch_label, network_label)\n        save_path = os.path.join(self.save_dir, save_filename)\n        torch.save(network.cpu().state_dict(), save_path)\n        if len(gpu_ids) and torch.cuda.is_available():\n            network.cuda()\n\n    # helper loading function that can be used by subclasses\n    def load_network(self, network, network_label, epoch_label, save_dir=\"\"):\n        save_filename = \"%s_net_%s.pth\" % (epoch_label, network_label)\n        if not save_dir:\n            save_dir = self.save_dir\n        save_path = os.path.join(save_dir, save_filename)\n        if not os.path.isfile(save_path):\n            print(\"%s not exists yet!\" % save_path)\n            if network_label == \"G\":\n                raise (\"Generator must exist!\")\n        else:\n            try:\n                network.load_state_dict(torch.load(save_path), strict=False)\n            except Exception as e:\n                print(e)\n                pretrained_dict = torch.load(save_path)\n                model_dict = network.state_dict()\n                try:\n                    pretrained_dict = {\n                        k: v for k, v in pretrained_dict.items() if k in model_dict\n                    }\n                    network.load_state_dict(pretrained_dict)\n                    if self.opt_verbose:\n                        print(\n                            \"Pretrained network %s has excessive layers;\"\n                            \"Only loading layers that are\"\n                            \"used\" % network_label\n                        )\n                except Exception as e:\n                    print(e)\n                    print(\n                        \"Pretrained network %s has fewer layers; The\"\n                        \"following are not initialized:\" % network_label\n                    )\n                    for k, v in pretrained_dict.items():\n                        if v.size() == model_dict[k].size():\n                            model_dict[k] = v\n\n                    if sys.version_info >= (3, 0):\n                        not_initialized = set()\n                    else:\n                        from sets import Set\n\n                        not_initialized = Set()\n\n                    for k, v in model_dict.items():\n                        if (k not in pretrained_dict) or (\n                            v.size() != pretrained_dict[k].size()\n                        ):\n                            not_initialized.add(k.split(\".\")[0])\n\n                    print(sorted(not_initialized))\n                    network.load_state_dict(model_dict)\n\n    def update_learning_rate(self):\n        pass\n"
  },
  {
    "path": "src/dot/simswap/models/fs_networks.py",
    "content": "#!/usr/bin/env python3\r\n\r\n\"\"\"\r\nCopyright (C) 2019 NVIDIA Corporation.  All rights reserved.\r\nLicensed under the CC BY-NC-SA 4.0 license\r\n(https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\r\n\"\"\"\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass InstanceNorm(nn.Module):\r\n    def __init__(self, epsilon=1e-8):\r\n        \"\"\"\r\n        @notice: avoid in-place ops.\r\n        https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3\r\n        \"\"\"\r\n        super(InstanceNorm, self).__init__()\r\n        self.epsilon = epsilon\r\n\r\n    def forward(self, x):\r\n        x = x - torch.mean(x, (2, 3), True)\r\n        tmp = torch.mul(x, x)  # or x ** 2\r\n        tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)\r\n        return x * tmp\r\n\r\n\r\nclass ApplyStyle(nn.Module):\r\n    \"\"\"\r\n    @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb\r\n    \"\"\"\r\n\r\n    def __init__(self, latent_size, channels):\r\n        super(ApplyStyle, self).__init__()\r\n        self.linear = nn.Linear(latent_size, channels * 2)\r\n\r\n    def forward(self, x, latent):\r\n        style = self.linear(latent)  # style => [batch_size, n_channels*2]\r\n        shape = [-1, 2, x.size(1), 1, 1]\r\n        style = style.view(shape)  # [batch_size, 2, n_channels, ...]\r\n        x = x * (style[:, 0] * 1 + 1.0) + style[:, 1] * 1\r\n        return x\r\n\r\n\r\nclass ResnetBlock_Adain(nn.Module):\r\n    def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):\r\n\r\n        super(ResnetBlock_Adain, self).__init__()\r\n\r\n        p = 0\r\n        conv1 = []\r\n        if padding_type == \"reflect\":\r\n            conv1 += [nn.ReflectionPad2d(1)]\r\n        elif padding_type == \"replicate\":\r\n            conv1 += [nn.ReplicationPad2d(1)]\r\n        elif padding_type == \"zero\":\r\n            p = 1\r\n        else:\r\n            raise NotImplementedError(\"padding [%s] is not implemented\" % padding_type)\r\n        conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]\r\n        self.conv1 = nn.Sequential(*conv1)\r\n        self.style1 = ApplyStyle(latent_size, dim)\r\n        self.act1 = activation\r\n\r\n        p = 0\r\n        conv2 = []\r\n        if padding_type == \"reflect\":\r\n            conv2 += [nn.ReflectionPad2d(1)]\r\n        elif padding_type == \"replicate\":\r\n            conv2 += [nn.ReplicationPad2d(1)]\r\n        elif padding_type == \"zero\":\r\n            p = 1\r\n        else:\r\n            raise NotImplementedError(\"padding [%s] is not implemented\" % padding_type)\r\n        conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]\r\n        self.conv2 = nn.Sequential(*conv2)\r\n        self.style2 = ApplyStyle(latent_size, dim)\r\n\r\n    def forward(self, x, dlatents_in_slice):\r\n        y = self.conv1(x)\r\n        y = self.style1(y, dlatents_in_slice)\r\n        y = self.act1(y)\r\n        y = self.conv2(y)\r\n        y = self.style2(y, dlatents_in_slice)\r\n        out = x + y\r\n        return out\r\n\r\n\r\nclass Generator_Adain_Upsample(nn.Module):\r\n    def __init__(\r\n        self,\r\n        input_nc,\r\n        output_nc,\r\n        latent_size,\r\n        n_blocks=6,\r\n        deep=False,\r\n        norm_layer=nn.BatchNorm2d,\r\n        padding_type=\"reflect\",\r\n    ):\r\n\r\n        assert n_blocks >= 0\r\n        super(Generator_Adain_Upsample, self).__init__()\r\n        activation = nn.ReLU(True)\r\n        self.deep = deep\r\n\r\n        self.first_layer = nn.Sequential(\r\n            nn.ReflectionPad2d(3),\r\n            nn.Conv2d(input_nc, 64, kernel_size=7, padding=0),\r\n            norm_layer(64),\r\n            activation,\r\n        )\r\n\r\n        # downsample\r\n        self.down1 = nn.Sequential(\r\n            nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),\r\n            norm_layer(128),\r\n            activation,\r\n        )\r\n        self.down2 = nn.Sequential(\r\n            nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),\r\n            norm_layer(256),\r\n            activation,\r\n        )\r\n        self.down3 = nn.Sequential(\r\n            nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),\r\n            norm_layer(512),\r\n            activation,\r\n        )\r\n        if self.deep:\r\n            self.down4 = nn.Sequential(\r\n                nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),\r\n                norm_layer(512),\r\n                activation,\r\n            )\r\n\r\n        # resnet blocks\r\n        BN = []\r\n        for i in range(n_blocks):\r\n            BN += [\r\n                ResnetBlock_Adain(\r\n                    512,\r\n                    latent_size=latent_size,\r\n                    padding_type=padding_type,\r\n                    activation=activation,\r\n                )\r\n            ]\r\n        self.BottleNeck = nn.Sequential(*BN)\r\n\r\n        if self.deep:\r\n            self.up4 = nn.Sequential(\r\n                nn.Upsample(scale_factor=2, mode=\"bilinear\"),\r\n                nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\r\n                nn.BatchNorm2d(512),\r\n                activation,\r\n            )\r\n        self.up3 = nn.Sequential(\r\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\r\n            nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),\r\n            nn.BatchNorm2d(256),\r\n            activation,\r\n        )\r\n        self.up2 = nn.Sequential(\r\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\r\n            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\r\n            nn.BatchNorm2d(128),\r\n            activation,\r\n        )\r\n        self.up1 = nn.Sequential(\r\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\r\n            nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),\r\n            nn.BatchNorm2d(64),\r\n            activation,\r\n        )\r\n        self.last_layer = nn.Sequential(\r\n            nn.ReflectionPad2d(3),\r\n            nn.Conv2d(64, output_nc, kernel_size=7, padding=0),\r\n            nn.Tanh(),\r\n        )\r\n\r\n    def forward(self, input, dlatents):\r\n        x = input  # 3*224*224\r\n\r\n        skip1 = self.first_layer(x)\r\n        skip2 = self.down1(skip1)\r\n        skip3 = self.down2(skip2)\r\n        if self.deep:\r\n            skip4 = self.down3(skip3)\r\n            x = self.down4(skip4)\r\n        else:\r\n            x = self.down3(skip3)\r\n\r\n        for i in range(len(self.BottleNeck)):\r\n            x = self.BottleNeck[i](x, dlatents)\r\n\r\n        if self.deep:\r\n            x = self.up4(x)\r\n        x = self.up3(x)\r\n        x = self.up2(x)\r\n        x = self.up1(x)\r\n        x = self.last_layer(x)\r\n        x = (x + 1) / 2\r\n\r\n        return x\r\n"
  },
  {
    "path": "src/dot/simswap/models/fs_networks_512.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nAuthor: Naiyuan liu\nGithub: https://github.com/NNNNAI\nDate: 2021-11-23 16:55:48\nLastEditors: Naiyuan liu\nLastEditTime: 2021-11-24 16:58:06\nDescription:\n\nCopyright (C) 2019 NVIDIA Corporation.  All rights reserved.\nLicensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\n\n\nclass InstanceNorm(nn.Module):\n    def __init__(self, epsilon=1e-8):\n        \"\"\"\n        @notice: avoid in-place ops.\n        https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3\n        \"\"\"\n        super(InstanceNorm, self).__init__()\n        self.epsilon = epsilon\n\n    def forward(self, x):\n        x = x - torch.mean(x, (2, 3), True)\n        tmp = torch.mul(x, x)  # or x ** 2\n        tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)\n        return x * tmp\n\n\nclass ApplyStyle(nn.Module):\n    \"\"\"\n    @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb\n    \"\"\"\n\n    def __init__(self, latent_size, channels):\n        super(ApplyStyle, self).__init__()\n        self.linear = nn.Linear(latent_size, channels * 2)\n\n    def forward(self, x, latent):\n        style = self.linear(latent)  # style => [batch_size, n_channels*2]\n        shape = [-1, 2, x.size(1), 1, 1]\n        style = style.view(shape)  # [batch_size, 2, n_channels, ...]\n        x = x * (style[:, 0] * 1 + 1.0) + style[:, 1] * 1\n        return x\n\n\nclass ResnetBlock_Adain(nn.Module):\n    def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):\n        super(ResnetBlock_Adain, self).__init__()\n\n        p = 0\n        conv1 = []\n        if padding_type == \"reflect\":\n            conv1 += [nn.ReflectionPad2d(1)]\n        elif padding_type == \"replicate\":\n            conv1 += [nn.ReplicationPad2d(1)]\n        elif padding_type == \"zero\":\n            p = 1\n        else:\n            raise NotImplementedError(\"padding [%s] is not implemented\" % padding_type)\n        conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]\n        self.conv1 = nn.Sequential(*conv1)\n        self.style1 = ApplyStyle(latent_size, dim)\n        self.act1 = activation\n\n        p = 0\n        conv2 = []\n        if padding_type == \"reflect\":\n            conv2 += [nn.ReflectionPad2d(1)]\n        elif padding_type == \"replicate\":\n            conv2 += [nn.ReplicationPad2d(1)]\n        elif padding_type == \"zero\":\n            p = 1\n        else:\n            raise NotImplementedError(\"padding [%s] is not implemented\" % padding_type)\n        conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]\n        self.conv2 = nn.Sequential(*conv2)\n        self.style2 = ApplyStyle(latent_size, dim)\n\n    def forward(self, x, dlatents_in_slice):\n        y = self.conv1(x)\n        y = self.style1(y, dlatents_in_slice)\n        y = self.act1(y)\n        y = self.conv2(y)\n        y = self.style2(y, dlatents_in_slice)\n        out = x + y\n        return out\n\n\nclass Generator_Adain_Upsample(nn.Module):\n    def __init__(\n        self,\n        input_nc,\n        output_nc,\n        latent_size,\n        n_blocks=6,\n        deep=False,\n        norm_layer=nn.BatchNorm2d,\n        padding_type=\"reflect\",\n    ):\n        assert n_blocks >= 0\n        super(Generator_Adain_Upsample, self).__init__()\n        activation = nn.ReLU(True)\n        self.deep = deep\n\n        self.first_layer = nn.Sequential(\n            nn.ReflectionPad2d(3),\n            nn.Conv2d(input_nc, 32, kernel_size=7, padding=0),\n            norm_layer(32),\n            activation,\n        )\n        # downsample\n        self.down0 = nn.Sequential(\n            nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),\n            norm_layer(64),\n            activation,\n        )\n        self.down1 = nn.Sequential(\n            nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),\n            norm_layer(128),\n            activation,\n        )\n        self.down2 = nn.Sequential(\n            nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),\n            norm_layer(256),\n            activation,\n        )\n        self.down3 = nn.Sequential(\n            nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),\n            norm_layer(512),\n            activation,\n        )\n        if self.deep:\n            self.down4 = nn.Sequential(\n                nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),\n                norm_layer(512),\n                activation,\n            )\n\n        # resnet blocks\n        BN = []\n        for i in range(n_blocks):\n            BN += [\n                ResnetBlock_Adain(\n                    512,\n                    latent_size=latent_size,\n                    padding_type=padding_type,\n                    activation=activation,\n                )\n            ]\n        self.BottleNeck = nn.Sequential(*BN)\n\n        if self.deep:\n            self.up4 = nn.Sequential(\n                nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n                nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),\n                nn.BatchNorm2d(512),\n                activation,\n            )\n        self.up3 = nn.Sequential(\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n            nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),\n            nn.BatchNorm2d(256),\n            activation,\n        )\n        self.up2 = nn.Sequential(\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),\n            nn.BatchNorm2d(128),\n            activation,\n        )\n        self.up1 = nn.Sequential(\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n            nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),\n            nn.BatchNorm2d(64),\n            activation,\n        )\n        self.up0 = nn.Sequential(\n            nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n            nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1),\n            nn.BatchNorm2d(32),\n            activation,\n        )\n        self.last_layer = nn.Sequential(\n            nn.ReflectionPad2d(3),\n            nn.Conv2d(32, output_nc, kernel_size=7, padding=0),\n            nn.Tanh(),\n        )\n\n    def forward(self, input, dlatents):\n        x = input  # 3*224*224\n\n        skip0 = self.first_layer(x)\n        skip1 = self.down0(skip0)\n        skip2 = self.down1(skip1)\n        skip3 = self.down2(skip2)\n        if self.deep:\n            skip4 = self.down3(skip3)\n            x = self.down4(skip4)\n        else:\n            x = self.down3(skip3)\n\n        for i in range(len(self.BottleNeck)):\n            x = self.BottleNeck[i](x, dlatents)\n\n        if self.deep:\n            x = self.up4(x)\n        x = self.up3(x)\n        x = self.up2(x)\n        x = self.up1(x)\n        x = self.up0(x)\n        x = self.last_layer(x)\n        x = (x + 1) / 2\n\n        return x\n"
  },
  {
    "path": "src/dot/simswap/models/models.py",
    "content": "#!/usr/bin/env python3\n\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.nn import Parameter\n\nfrom dot.simswap.parsing_model.resnet import conv3x3\n\n\nclass SEBlock(nn.Module):\n    def __init__(self, channel, reduction=16):\n        super(SEBlock, self).__init__()\n        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n        self.fc = nn.Sequential(\n            nn.Linear(channel, channel // reduction),\n            nn.PReLU(),\n            nn.Linear(channel // reduction, channel),\n            nn.Sigmoid(),\n        )\n\n    def forward(self, x):\n        b, c, _, _ = x.size()\n        y = self.avg_pool(x).view(b, c)\n        y = self.fc(y).view(b, c, 1, 1)\n        return x * y\n\n\n# Todo Can this be removed?\nclass IRBlock(nn.Module):\n    expansion = 1\n\n    def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):\n        super(IRBlock, self).__init__()\n        self.bn0 = nn.BatchNorm2d(inplanes)\n        self.conv1 = conv3x3(inplanes, inplanes)\n        self.bn1 = nn.BatchNorm2d(inplanes)\n        self.prelu = nn.PReLU()\n        self.conv2 = conv3x3(inplanes, planes, stride)\n        self.bn2 = nn.BatchNorm2d(planes)\n        self.downsample = downsample\n        self.stride = stride\n        self.use_se = use_se\n        if self.use_se:\n            self.se = SEBlock(planes)\n\n    def forward(self, x):\n        residual = x\n        out = self.bn0(x)\n        out = self.conv1(out)\n        out = self.bn1(out)\n        out = self.prelu(out)\n\n        out = self.conv2(out)\n        out = self.bn2(out)\n        if self.use_se:\n            out = self.se(out)\n\n        if self.downsample is not None:\n            residual = self.downsample(x)\n\n        out += residual\n        out = self.prelu(out)\n\n        return out\n\n\nclass ResNet(nn.Module):\n    def __init__(self, block, layers, use_se=True):\n        self.inplanes = 64\n        self.use_se = use_se\n        super(ResNet, self).__init__()\n        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False)\n        self.bn1 = nn.BatchNorm2d(64)\n        self.prelu = nn.PReLU()\n        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)\n        self.layer1 = self._make_layer(block, 64, layers[0])\n        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n        self.bn2 = nn.BatchNorm2d(512)\n        self.dropout = nn.Dropout()\n        self.fc = nn.Linear(512 * 7 * 7, 512)\n        self.bn3 = nn.BatchNorm1d(512)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.xavier_normal_(m.weight)\n            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):\n                nn.init.constant_(m.weight, 1)\n                nn.init.constant_(m.bias, 0)\n            elif isinstance(m, nn.Linear):\n                nn.init.xavier_normal_(m.weight)\n                nn.init.constant_(m.bias, 0)\n\n    def _make_layer(self, block, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or self.inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.Conv2d(\n                    self.inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=False,\n                ),\n                nn.BatchNorm2d(planes * block.expansion),\n            )\n\n        layers = []\n        layers.append(\n            block(self.inplanes, planes, stride, downsample, use_se=self.use_se)\n        )\n        self.inplanes = planes\n        for i in range(1, blocks):\n            layers.append(block(self.inplanes, planes, use_se=self.use_se))\n\n        return nn.Sequential(*layers)\n\n    def forward(self, x):\n        x = self.conv1(x)\n        x = self.bn1(x)\n        x = self.prelu(x)\n        x = self.maxpool(x)\n\n        x = self.layer1(x)\n        x = self.layer2(x)\n        x = self.layer3(x)\n        x = self.layer4(x)\n\n        x = self.bn2(x)\n        x = self.dropout(x)\n        x = x.view(x.size(0), -1)\n        x = self.fc(x)\n        x = self.bn3(x)\n\n        return x\n\n\nclass ArcMarginModel(nn.Module):\n    def __init__(self, args):\n        super(ArcMarginModel, self).__init__()\n\n        num_classes = 93431\n        self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size))\n        nn.init.xavier_uniform_(self.weight)\n\n        self.easy_margin = args.easy_margin\n        self.m = args.margin_m\n        self.s = args.margin_s\n\n        self.cos_m = math.cos(self.m)\n        self.sin_m = math.sin(self.m)\n        self.th = math.cos(math.pi - self.m)\n        self.mm = math.sin(math.pi - self.m) * self.m\n\n    def forward(self, input, label):\n        x = F.normalize(input)\n        W = F.normalize(self.weight)\n        cosine = F.linear(x, W)\n        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))\n        phi = cosine * self.cos_m - sine * self.sin_m  # cos(theta + m)\n        if self.easy_margin:\n            phi = torch.where(cosine > 0, phi, cosine)\n        else:\n            phi = torch.where(cosine > self.th, phi, cosine - self.mm)\n\n        device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n        one_hot = torch.zeros(cosine.size(), device=device)\n        one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)\n        output *= self.s\n        return output\n"
  },
  {
    "path": "src/dot/simswap/option.py",
    "content": "#!/usr/bin/env python3\n\nimport cv2\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom PIL import Image\nfrom torchvision import transforms\n\nfrom dot.commons import ModelOption\nfrom dot.simswap.fs_model import create_model\nfrom dot.simswap.mediapipe.face_mesh import FaceMesh\nfrom dot.simswap.parsing_model.model import BiSeNet\nfrom dot.simswap.util.norm import SpecificNorm\nfrom dot.simswap.util.reverse2original import reverse2wholeimage\nfrom dot.simswap.util.util import _totensor\n\n\nclass SimswapOption(ModelOption):\n    \"\"\"Extends `ModelOption` and initializes models.\"\"\"\n\n    def __init__(\n        self,\n        use_gpu=True,\n        use_mask=False,\n        crop_size=224,\n        gpen_type=None,\n        gpen_path=None,\n    ):\n        super(SimswapOption, self).__init__(\n            gpen_type=gpen_type,\n            use_gpu=use_gpu,\n            crop_size=crop_size,\n            gpen_path=gpen_path,\n        )\n        self.use_mask = use_mask\n\n    def create_model(  # type: ignore\n        self,\n        detection_threshold=0.6,\n        det_size=(640, 640),\n        opt_verbose=False,\n        opt_crop_size=224,\n        opt_gpu_ids=[0],\n        opt_fp16=False,\n        checkpoints_dir=\"./checkpoints\",\n        opt_name=\"people\",\n        opt_resize_or_crop=\"scale_width\",\n        opt_load_pretrain=\"\",\n        opt_which_epoch=\"latest\",\n        opt_continue_train=\"store_true\",\n        parsing_model_path=\"./parsing_model/checkpoint/79999_iter.pth\",\n        arcface_model_path=\"./arcface_model/arcface_checkpoint.tar\",\n        **kwargs\n    ) -> None:\n        # preprocess_f\n        self.transformer_Arcface = transforms.Compose(\n            [\n                transforms.ToTensor(),\n                transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n            ]\n        )\n\n        if opt_crop_size == 512:\n            opt_which_epoch = 550000\n            opt_name = \"512\"\n            self.mode = \"ffhq\"\n        else:\n            self.mode = \"None\"\n\n        self.detect_model = FaceMesh(\n            static_image_mode=True,\n            max_num_faces=2,\n            refine_landmarks=True,\n            min_detection_confidence=0.5,\n            mode=self.mode,\n        )\n\n        # Tod check if we need this\n        self.spNorm = SpecificNorm(use_gpu=self.use_gpu)\n        if self.use_mask:\n            n_classes = 19\n            self.net = BiSeNet(n_classes=n_classes)\n            if self.use_gpu:\n                device = \"mps\" if torch.backends.mps.is_available() else \"cuda\"\n                self.net.to(device)\n                self.net.load_state_dict(\n                    torch.load(parsing_model_path, map_location=device)\n                )\n            else:\n                self.net.cpu()\n                self.net.load_state_dict(\n                    torch.load(parsing_model_path, map_location=torch.device(\"cpu\"))\n                )\n\n            self.net.eval()\n        else:\n            self.net = None\n\n        torch.nn.Module.dump_patches = False\n\n        # Model\n        self.model = create_model(\n            opt_verbose,\n            opt_crop_size,\n            opt_fp16,\n            opt_gpu_ids,\n            checkpoints_dir,\n            opt_name,\n            opt_resize_or_crop,\n            opt_load_pretrain,\n            opt_which_epoch,\n            opt_continue_train,\n            arcface_model_path,\n            use_gpu=self.use_gpu,\n        )\n        self.model.eval()\n\n    def change_option(self, image: np.array, **kwargs) -> None:\n        \"\"\"Sets the source image in source/target pair face-swap.\n\n        Args:\n            image (np.array): Source image.\n        \"\"\"\n        img_a_align_crop, _ = self.detect_model.get(image, self.crop_size)\n        img_a_align_crop_pil = Image.fromarray(\n            cv2.cvtColor(img_a_align_crop[0], cv2.COLOR_BGR2RGB)\n        )\n        img_a = self.transformer_Arcface(img_a_align_crop_pil)\n        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])\n\n        # convert numpy to tensor\n        if self.use_gpu:\n            img_id = (\n                img_id.to(\"mps\")\n                if torch.backends.mps.is_available()\n                else img_id.to(\"cuda\")\n            )\n        else:\n            img_id = img_id.cpu()\n\n        # create latent id\n        img_id_downsample = F.interpolate(img_id, size=(112, 112))\n        source_image = self.model.netArc(img_id_downsample)\n        source_image = source_image.detach().to(\"cpu\")\n        source_image = source_image / np.linalg.norm(\n            source_image, axis=1, keepdims=True\n        )\n\n        source_image = (\n            source_image.to(\"mps\" if torch.backends.mps.is_available() else \"cuda\")\n            if self.use_gpu\n            else source_image.to(\"cpu\")\n        )\n        self.source_image = source_image\n\n    def process_image(self, image: np.array, **kwargs) -> np.array:\n        \"\"\"Main process of simswap method. There are 3 main steps:\n        * face detection and alignment of target image.\n        * swap with `self.source_image`.\n        * face segmentation and reverse to whole image.\n\n        Args:\n            image (np.array): Target frame where face from `self.source_image` will be swapped with.\n\n        Returns:\n            np.array: Resulted face-swap image\n        \"\"\"\n\n        detect_results = self.detect_model.get(image, self.crop_size)\n        if detect_results is not None:\n            frame_align_crop_list = detect_results[0]\n            frame_mat_list = detect_results[1]\n            swap_result_list = []\n            frame_align_crop_tenor_list = []\n            for frame_align_crop in frame_align_crop_list:\n                if self.use_gpu:\n                    frame_align_crop_tenor = _totensor(\n                        cv2.cvtColor(frame_align_crop, cv2.COLOR_BGR2RGB)\n                    )[None, ...].to(\n                        \"mps\" if torch.backends.mps.is_available() else \"cuda\"\n                    )\n                else:\n                    frame_align_crop_tenor = _totensor(\n                        cv2.cvtColor(frame_align_crop, cv2.COLOR_BGR2RGB)\n                    )[None, ...].cpu()\n\n                swap_result = self.model(\n                    None, frame_align_crop_tenor, self.source_image, None, True\n                )[0]\n                swap_result_list.append(swap_result)\n                frame_align_crop_tenor_list.append(frame_align_crop_tenor)\n\n            result_frame = reverse2wholeimage(\n                frame_align_crop_tenor_list,\n                swap_result_list,\n                frame_mat_list,\n                self.crop_size,\n                image,\n                pasring_model=self.net,\n                use_mask=self.use_mask,\n                norm=self.spNorm,\n                use_gpu=self.use_gpu,\n                use_cam=kwargs.get(\"use_cam\", True),\n            )\n            return result_frame\n        else:\n            return image\n"
  },
  {
    "path": "src/dot/simswap/parsing_model/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/simswap/parsing_model/model.py",
    "content": "#!/usr/bin/python\r\n# -*- encoding: utf-8 -*-\r\n\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom .resnet import Resnet18\r\n\r\n\r\nclass ConvBNReLU(nn.Module):\r\n    def __init__(self, in_chan, out_chan, ks=3, stride=1, padding=1, *args, **kwargs):\r\n        super(ConvBNReLU, self).__init__()\r\n        self.conv = nn.Conv2d(\r\n            in_chan,\r\n            out_chan,\r\n            kernel_size=ks,\r\n            stride=stride,\r\n            padding=padding,\r\n            bias=False,\r\n        )\r\n        self.bn = nn.BatchNorm2d(out_chan)\r\n        self.init_weight()\r\n\r\n    def forward(self, x):\r\n        x = self.conv(x)\r\n        x = F.relu(self.bn(x))\r\n        return x\r\n\r\n    def init_weight(self):\r\n        for ly in self.children():\r\n            if isinstance(ly, nn.Conv2d):\r\n                nn.init.kaiming_normal_(ly.weight, a=1)\r\n                if ly.bias is not None:\r\n                    nn.init.constant_(ly.bias, 0)\r\n\r\n\r\nclass BiSeNetOutput(nn.Module):\r\n    def __init__(self, in_chan, mid_chan, n_classes, *args, **kwargs):\r\n        super(BiSeNetOutput, self).__init__()\r\n        self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)\r\n        self.conv_out = nn.Conv2d(mid_chan, n_classes, kernel_size=1, bias=False)\r\n        self.init_weight()\r\n\r\n    def forward(self, x):\r\n        x = self.conv(x)\r\n        x = self.conv_out(x)\r\n        return x\r\n\r\n    def init_weight(self):\r\n        for ly in self.children():\r\n            if isinstance(ly, nn.Conv2d):\r\n                nn.init.kaiming_normal_(ly.weight, a=1)\r\n                if ly.bias is not None:\r\n                    nn.init.constant_(ly.bias, 0)\r\n\r\n    def get_params(self):\r\n        wd_params, nowd_params = [], []\r\n        for name, module in self.named_modules():\r\n            if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):\r\n                wd_params.append(module.weight)\r\n                if module.bias is not None:\r\n                    nowd_params.append(module.bias)\r\n            elif isinstance(module, nn.BatchNorm2d):\r\n                nowd_params += list(module.parameters())\r\n        return wd_params, nowd_params\r\n\r\n\r\nclass AttentionRefinementModule(nn.Module):\r\n    def __init__(self, in_chan, out_chan, *args, **kwargs):\r\n        super(AttentionRefinementModule, self).__init__()\r\n        self.conv = ConvBNReLU(in_chan, out_chan, ks=3, stride=1, padding=1)\r\n        self.conv_atten = nn.Conv2d(out_chan, out_chan, kernel_size=1, bias=False)\r\n        self.bn_atten = nn.BatchNorm2d(out_chan)\r\n        self.sigmoid_atten = nn.Sigmoid()\r\n        self.init_weight()\r\n\r\n    def forward(self, x):\r\n        feat = self.conv(x)\r\n        atten = F.avg_pool2d(feat, feat.size()[2:])\r\n        atten = self.conv_atten(atten)\r\n        atten = self.bn_atten(atten)\r\n        atten = self.sigmoid_atten(atten)\r\n        out = torch.mul(feat, atten)\r\n        return out\r\n\r\n    def init_weight(self):\r\n        for ly in self.children():\r\n            if isinstance(ly, nn.Conv2d):\r\n                nn.init.kaiming_normal_(ly.weight, a=1)\r\n                if ly.bias is not None:\r\n                    nn.init.constant_(ly.bias, 0)\r\n\r\n\r\nclass ContextPath(nn.Module):\r\n    def __init__(self, *args, **kwargs):\r\n        super(ContextPath, self).__init__()\r\n        self.resnet = Resnet18()\r\n        self.arm16 = AttentionRefinementModule(256, 128)\r\n        self.arm32 = AttentionRefinementModule(512, 128)\r\n        self.conv_head32 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)\r\n        self.conv_head16 = ConvBNReLU(128, 128, ks=3, stride=1, padding=1)\r\n        self.conv_avg = ConvBNReLU(512, 128, ks=1, stride=1, padding=0)\r\n\r\n        self.init_weight()\r\n\r\n    def forward(self, x):\r\n        H0, W0 = x.size()[2:]\r\n        feat8, feat16, feat32 = self.resnet(x)\r\n        H8, W8 = feat8.size()[2:]\r\n        H16, W16 = feat16.size()[2:]\r\n        H32, W32 = feat32.size()[2:]\r\n\r\n        avg = F.avg_pool2d(feat32, feat32.size()[2:])\r\n        avg = self.conv_avg(avg)\r\n        avg_up = F.interpolate(avg, (H32, W32), mode=\"nearest\")\r\n\r\n        feat32_arm = self.arm32(feat32)\r\n        feat32_sum = feat32_arm + avg_up\r\n        feat32_up = F.interpolate(feat32_sum, (H16, W16), mode=\"nearest\")\r\n        feat32_up = self.conv_head32(feat32_up)\r\n\r\n        feat16_arm = self.arm16(feat16)\r\n        feat16_sum = feat16_arm + feat32_up\r\n        feat16_up = F.interpolate(feat16_sum, (H8, W8), mode=\"nearest\")\r\n        feat16_up = self.conv_head16(feat16_up)\r\n\r\n        return feat8, feat16_up, feat32_up  # x8, x8, x16\r\n\r\n    def init_weight(self):\r\n        for ly in self.children():\r\n            if isinstance(ly, nn.Conv2d):\r\n                nn.init.kaiming_normal_(ly.weight, a=1)\r\n                if ly.bias is not None:\r\n                    nn.init.constant_(ly.bias, 0)\r\n\r\n    def get_params(self):\r\n        wd_params, nowd_params = [], []\r\n        for name, module in self.named_modules():\r\n            if isinstance(module, (nn.Linear, nn.Conv2d)):\r\n                wd_params.append(module.weight)\r\n                if module.bias is not None:\r\n                    nowd_params.append(module.bias)\r\n            elif isinstance(module, nn.BatchNorm2d):\r\n                nowd_params += list(module.parameters())\r\n        return wd_params, nowd_params\r\n\r\n\r\nclass FeatureFusionModule(nn.Module):\r\n    def __init__(self, in_chan, out_chan, *args, **kwargs):\r\n        super(FeatureFusionModule, self).__init__()\r\n        self.convblk = ConvBNReLU(in_chan, out_chan, ks=1, stride=1, padding=0)\r\n        self.conv1 = nn.Conv2d(\r\n            out_chan, out_chan // 4, kernel_size=1, stride=1, padding=0, bias=False\r\n        )\r\n        self.conv2 = nn.Conv2d(\r\n            out_chan // 4, out_chan, kernel_size=1, stride=1, padding=0, bias=False\r\n        )\r\n        self.relu = nn.ReLU(inplace=True)\r\n        self.sigmoid = nn.Sigmoid()\r\n        self.init_weight()\r\n\r\n    def forward(self, fsp, fcp):\r\n        fcat = torch.cat([fsp, fcp], dim=1)\r\n        feat = self.convblk(fcat)\r\n        atten = F.avg_pool2d(feat, feat.size()[2:])\r\n        atten = self.conv1(atten)\r\n        atten = self.relu(atten)\r\n        atten = self.conv2(atten)\r\n        atten = self.sigmoid(atten)\r\n        feat_atten = torch.mul(feat, atten)\r\n        feat_out = feat_atten + feat\r\n        return feat_out\r\n\r\n    def init_weight(self):\r\n        for ly in self.children():\r\n            if isinstance(ly, nn.Conv2d):\r\n                nn.init.kaiming_normal_(ly.weight, a=1)\r\n                if ly.bias is not None:\r\n                    nn.init.constant_(ly.bias, 0)\r\n\r\n    def get_params(self):\r\n        wd_params, nowd_params = [], []\r\n        for name, module in self.named_modules():\r\n            if isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d):\r\n                wd_params.append(module.weight)\r\n                if module.bias is not None:\r\n                    nowd_params.append(module.bias)\r\n            elif isinstance(module, nn.BatchNorm2d):\r\n                nowd_params += list(module.parameters())\r\n        return wd_params, nowd_params\r\n\r\n\r\nclass BiSeNet(nn.Module):\r\n    def __init__(self, n_classes, *args, **kwargs):\r\n        super(BiSeNet, self).__init__()\r\n        self.cp = ContextPath()\r\n        # here self.sp is deleted\r\n        self.ffm = FeatureFusionModule(256, 256)\r\n        self.conv_out = BiSeNetOutput(256, 256, n_classes)\r\n        self.conv_out16 = BiSeNetOutput(128, 64, n_classes)\r\n        self.conv_out32 = BiSeNetOutput(128, 64, n_classes)\r\n        self.init_weight()\r\n\r\n    def forward(self, x):\r\n        H, W = x.size()[2:]\r\n        # here return res3b1 feature\r\n        feat_res8, feat_cp8, feat_cp16 = self.cp(x)\r\n        # use res3b1 feature to replace spatial path feature\r\n        feat_sp = feat_res8\r\n        feat_fuse = self.ffm(feat_sp, feat_cp8)\r\n\r\n        feat_out = self.conv_out(feat_fuse)\r\n        feat_out16 = self.conv_out16(feat_cp8)\r\n        feat_out32 = self.conv_out32(feat_cp16)\r\n\r\n        feat_out = F.interpolate(feat_out, (H, W), mode=\"bilinear\", align_corners=True)\r\n        feat_out16 = F.interpolate(\r\n            feat_out16, (H, W), mode=\"bilinear\", align_corners=True\r\n        )\r\n        feat_out32 = F.interpolate(\r\n            feat_out32, (H, W), mode=\"bilinear\", align_corners=True\r\n        )\r\n        return feat_out, feat_out16, feat_out32\r\n\r\n    def init_weight(self):\r\n        for ly in self.children():\r\n            if isinstance(ly, nn.Conv2d):\r\n                nn.init.kaiming_normal_(ly.weight, a=1)\r\n                if ly.bias is not None:\r\n                    nn.init.constant_(ly.bias, 0)\r\n\r\n    def get_params(self):\r\n        wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params = [], [], [], []\r\n        for name, child in self.named_children():\r\n            child_wd_params, child_nowd_params = child.get_params()\r\n            if isinstance(child, FeatureFusionModule) or isinstance(\r\n                child, BiSeNetOutput\r\n            ):\r\n\r\n                lr_mul_wd_params += child_wd_params\r\n                lr_mul_nowd_params += child_nowd_params\r\n            else:\r\n                wd_params += child_wd_params\r\n                nowd_params += child_nowd_params\r\n        return wd_params, nowd_params, lr_mul_wd_params, lr_mul_nowd_params\r\n\r\n\r\nif __name__ == \"__main__\":\r\n    net = BiSeNet(19)\r\n    net.cuda()\r\n    net.eval()\r\n    in_ten = torch.randn(16, 3, 640, 480).cuda()\r\n    out, out16, out32 = net(in_ten)\r\n    print(out.shape)\r\n\r\n    net.get_params()\r\n"
  },
  {
    "path": "src/dot/simswap/parsing_model/resnet.py",
    "content": "#!/usr/bin/python\r\n# -*- encoding: utf-8 -*-\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nresnet18_url = \"saved_models/simswap/resnet18-5c106cde.pth\"\r\n\r\n\r\ndef conv3x3(in_planes, out_planes, stride=1):\r\n    \"\"\"3x3 convolution with padding\"\"\"\r\n    return nn.Conv2d(\r\n        in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False\r\n    )\r\n\r\n\r\nclass BasicBlock(nn.Module):\r\n    def __init__(self, in_chan, out_chan, stride=1):\r\n        super(BasicBlock, self).__init__()\r\n        self.conv1 = conv3x3(in_chan, out_chan, stride)\r\n        self.bn1 = nn.BatchNorm2d(out_chan)\r\n        self.conv2 = conv3x3(out_chan, out_chan)\r\n        self.bn2 = nn.BatchNorm2d(out_chan)\r\n        self.relu = nn.ReLU(inplace=True)\r\n        self.downsample = None\r\n        if in_chan != out_chan or stride != 1:\r\n            self.downsample = nn.Sequential(\r\n                nn.Conv2d(in_chan, out_chan, kernel_size=1, stride=stride, bias=False),\r\n                nn.BatchNorm2d(out_chan),\r\n            )\r\n\r\n    def forward(self, x):\r\n        residual = self.conv1(x)\r\n        residual = F.relu(self.bn1(residual))\r\n        residual = self.conv2(residual)\r\n        residual = self.bn2(residual)\r\n\r\n        shortcut = x\r\n        if self.downsample is not None:\r\n            shortcut = self.downsample(x)\r\n\r\n        out = shortcut + residual\r\n        out = self.relu(out)\r\n        return out\r\n\r\n\r\ndef create_layer_basic(in_chan, out_chan, bnum, stride=1):\r\n    layers = [BasicBlock(in_chan, out_chan, stride=stride)]\r\n    for i in range(bnum - 1):\r\n        layers.append(BasicBlock(out_chan, out_chan, stride=1))\r\n    return nn.Sequential(*layers)\r\n\r\n\r\nclass Resnet18(nn.Module):\r\n    def __init__(self):\r\n        super(Resnet18, self).__init__()\r\n        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\r\n        self.bn1 = nn.BatchNorm2d(64)\r\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\r\n        self.layer1 = create_layer_basic(64, 64, bnum=2, stride=1)\r\n        self.layer2 = create_layer_basic(64, 128, bnum=2, stride=2)\r\n        self.layer3 = create_layer_basic(128, 256, bnum=2, stride=2)\r\n        self.layer4 = create_layer_basic(256, 512, bnum=2, stride=2)\r\n        self.init_weight()\r\n\r\n    def forward(self, x):\r\n        x = self.conv1(x)\r\n        x = F.relu(self.bn1(x))\r\n        x = self.maxpool(x)\r\n\r\n        x = self.layer1(x)\r\n        feat8 = self.layer2(x)  # 1/8\r\n        feat16 = self.layer3(feat8)  # 1/16\r\n        feat32 = self.layer4(feat16)  # 1/32\r\n        return feat8, feat16, feat32\r\n\r\n    def init_weight(self):\r\n        state_dict = torch.load(resnet18_url, map_location=None, weights_only=False)\r\n        self_state_dict = self.state_dict()\r\n        for k, v in state_dict.items():\r\n            if \"fc\" in k:\r\n                continue\r\n            self_state_dict.update({k: v})\r\n        self.load_state_dict(self_state_dict)\r\n\r\n    def get_params(self):\r\n        wd_params, nowd_params = [], []\r\n        for name, module in self.named_modules():\r\n            if isinstance(module, (nn.Linear, nn.Conv2d)):\r\n                wd_params.append(module.weight)\r\n                if module.bias is not None:\r\n                    nowd_params.append(module.bias)\r\n            elif isinstance(module, nn.BatchNorm2d):\r\n                nowd_params += list(module.parameters())\r\n        return wd_params, nowd_params\r\n\r\n\r\nif __name__ == \"__main__\":\r\n    net = Resnet18()\r\n    x = torch.randn(16, 3, 224, 224)\r\n    out = net(x)\r\n    print(out[0].size())\r\n    print(out[1].size())\r\n    print(out[2].size())\r\n    net.get_params()\r\n"
  },
  {
    "path": "src/dot/simswap/util/__init__.py",
    "content": "#!/usr/bin/env python3\n"
  },
  {
    "path": "src/dot/simswap/util/norm.py",
    "content": "#!/usr/bin/env python3\r\n\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass SpecificNorm(nn.Module):\r\n    def __init__(self, epsilon=1e-8, use_gpu=True):\r\n        \"\"\"\r\n        @notice: avoid in-place ops.\r\n        https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3\r\n        \"\"\"\r\n        super(SpecificNorm, self).__init__()\r\n        self.mean = np.array([0.485, 0.456, 0.406])\r\n        if use_gpu:\r\n            self.mean = (\r\n                torch.from_numpy(self.mean)\r\n                .float()\r\n                .to(\"mps\" if torch.backends.mps.is_available() else \"cuda\")\r\n            )\r\n        else:\r\n            self.mean = torch.from_numpy(self.mean).float().cpu()\r\n\r\n        self.mean = self.mean.view([1, 3, 1, 1])\r\n\r\n        self.std = np.array([0.229, 0.224, 0.225])\r\n        if use_gpu:\r\n            self.std = (\r\n                torch.from_numpy(self.std)\r\n                .float()\r\n                .to(\"mps\" if torch.backends.mps.is_available() else \"cuda\")\r\n            )\r\n        else:\r\n            self.std = torch.from_numpy(self.std).float().cpu()\r\n\r\n        self.std = self.std.view([1, 3, 1, 1])\r\n\r\n    def forward(self, x, use_gpu=True):\r\n        mean = self.mean.expand([1, 3, x.shape[2], x.shape[3]])\r\n        std = self.std.expand([1, 3, x.shape[2], x.shape[3]])\r\n\r\n        if use_gpu:\r\n            x = (x - mean) / std\r\n        else:\r\n            x = (x - mean.detach().to(\"cpu\")) / std.detach().to(\"cpu\")\r\n\r\n        return x\r\n"
  },
  {
    "path": "src/dot/simswap/util/reverse2original.py",
    "content": "#!/usr/bin/env python3\n\nimport cv2\nimport kornia as K\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom kornia.geometry import transform as ko_transform\nfrom torch.nn import functional as F\n\n\ndef isin(ar1, ar2):\n    return (ar1[..., None] == ar2).any(-1)\n\n\ndef encode_segmentation_rgb(segmentation, device, no_neck=True):\n    parse = segmentation\n\n    face_part_ids = (\n        [1, 2, 3, 4, 5, 6, 10, 12, 13]\n        if no_neck\n        else [1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14]\n    )\n    mouth_id = [11]\n\n    face_map = (\n        isin(\n            parse,\n            torch.tensor(face_part_ids).to(device),\n        )\n        * 255.0\n    ).to(device)\n    mouth_map = (\n        isin(\n            parse,\n            torch.tensor(mouth_id).to(device),\n        )\n        * 255.0\n    ).to(device)\n    mask_stack = torch.stack((face_map, mouth_map), axis=2)\n\n    mask_out = torch.zeros([2, parse.shape[0], parse.shape[1]]).to(device)\n    mask_out[0, :, :] = mask_stack[:, :, 0]\n    mask_out[1, :, :] = mask_stack[:, :, 1]\n\n    return mask_out\n\n\nclass SoftErosion(nn.Module):\n    def __init__(self, kernel_size=15, threshold=0.6, iterations=1):\n        super(SoftErosion, self).__init__()\n        r = kernel_size // 2\n        self.padding = r\n        self.iterations = iterations\n        self.threshold = threshold\n\n        # Create kernel\n        y_indices, x_indices = torch.meshgrid(\n            torch.arange(0.0, kernel_size),\n            torch.arange(0.0, kernel_size),\n            indexing=\"xy\",\n        )\n        dist = torch.sqrt((x_indices - r) ** 2 + (y_indices - r) ** 2)\n        kernel = dist.max() - dist\n        kernel /= kernel.sum()\n        kernel = kernel.view(1, 1, *kernel.shape)\n        self.register_buffer(\"weight\", kernel)\n\n    def forward(self, x):\n        x = x.float()\n        for i in range(self.iterations - 1):\n            x = torch.min(\n                x,\n                F.conv2d(\n                    x, weight=self.weight, groups=x.shape[1], padding=self.padding\n                ),\n            )\n        x = F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding)\n\n        mask = x >= self.threshold\n        x[mask] = 1.0\n        x[~mask] /= x[~mask].max()\n\n        return x, mask\n\n\ndef postprocess(swapped_face, target, target_mask, smooth_mask, device):\n\n    target_mask /= 255.0\n\n    face_mask_tensor = target_mask[0] + target_mask[1]\n\n    soft_face_mask_tensor, _ = smooth_mask(face_mask_tensor.unsqueeze_(0).unsqueeze_(0))\n    soft_face_mask_tensor.squeeze_()\n\n    soft_face_mask_tensor = soft_face_mask_tensor[None, :, :]\n\n    result = swapped_face * soft_face_mask_tensor + target * (1 - soft_face_mask_tensor)\n\n    return result\n\n\ndef reverse2wholeimage(\n    b_align_crop_tenor_list,\n    swaped_imgs,\n    mats,\n    crop_size,\n    oriimg,\n    pasring_model=None,\n    norm=None,\n    use_mask=True,\n    use_gpu=True,\n    use_cam=True,\n):\n\n    device = torch.device(\n        (\"mps\" if torch.backends.mps.is_available() else \"cuda\") if use_gpu else \"cpu\"\n    )\n    if use_mask:\n        smooth_mask = SoftErosion(kernel_size=17, threshold=0.9, iterations=7).to(\n            device\n        )\n\n    img = K.utils.image_to_tensor(oriimg).float().to(device)\n    img /= 255.0\n    kernel_use_cam = torch.ones(5, 5).to(device)\n    kernel_use_image = np.ones((40, 40), np.uint8)\n    orisize = (oriimg.shape[0], oriimg.shape[1])\n    mat_rev_initial = np.ones([3, 3])\n    mat_rev_initial[2, :] = np.array([0.0, 0.0, 1.0])\n    for swaped_img, mat, source_img in zip(swaped_imgs, mats, b_align_crop_tenor_list):\n\n        img_white = torch.full((1, 3, crop_size, crop_size), 1.0, dtype=torch.float).to(\n            device\n        )\n\n        # invert the Affine transformation matrix\n        mat_rev_initial[0:2, :] = mat\n        mat_rev = np.linalg.inv(mat_rev_initial.astype(np.float32))\n        mat_rev = mat_rev[:2, :]\n        mat_rev = torch.tensor(mat_rev[None, ...]).to(device)\n\n        if use_mask:\n            source_img_norm = norm(source_img, use_gpu=use_gpu)\n            source_img_512 = F.interpolate(source_img_norm, size=(512, 512))\n            out = pasring_model(source_img_512)[0]\n            parsing = out.squeeze(0).argmax(0)\n\n            tgt_mask = encode_segmentation_rgb(parsing, device)\n\n            # If the mask is large\n            if tgt_mask.sum() >= 5000:\n\n                target_mask = ko_transform.resize(tgt_mask, (crop_size, crop_size))\n\n                target_image_parsing = postprocess(\n                    swaped_img,\n                    source_img[0],\n                    target_mask,\n                    smooth_mask,\n                    device=device,\n                )\n\n                target_image_parsing = target_image_parsing[None, ...]\n                swaped_img = swaped_img[None, ...]\n\n                target_image = ko_transform.warp_affine(\n                    target_image_parsing, mat_rev, orisize\n                )\n            else:\n                swaped_img = swaped_img[None, ...]\n                target_image = ko_transform.warp_affine(\n                    swaped_img,\n                    mat_rev,\n                    orisize,\n                )\n        else:\n            swaped_img = swaped_img[None, ...]\n            target_image = ko_transform.warp_affine(\n                swaped_img,\n                mat_rev,\n                orisize,\n            )\n\n        img_white = ko_transform.warp_affine(img_white, mat_rev, orisize)\n\n        img_white[img_white > 0.0784] = 1.0\n\n        if use_cam:\n            img_white = K.morphology.erosion(img_white, kernel_use_cam)\n        else:\n            img_white = K.utils.tensor_to_image(img_white) * 255\n            img_white = cv2.erode(img_white, kernel_use_image, iterations=1)\n            img_white = cv2.GaussianBlur(img_white, (41, 41), 0)\n            img_white = K.utils.image_to_tensor(img_white).to(device)\n            img_white /= 255.0\n\n        target_image = K.color.rgb_to_bgr(target_image)\n\n        img = img_white * target_image + (1 - img_white) * img\n\n    final_img = K.utils.tensor_to_image(img)\n    final_img = (final_img * 255).astype(np.uint8)\n\n    return final_img\n"
  },
  {
    "path": "src/dot/simswap/util/util.py",
    "content": "#!/usr/bin/env python3\r\n\r\nfrom __future__ import print_function\r\n\r\nimport os\r\n\r\nimport cv2\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\nfrom PIL import Image\r\n\r\nfrom ..parsing_model.model import BiSeNet\r\n\r\n\r\n# Converts a Tensor into a Numpy array\r\n# |imtype|: the desired type of the converted numpy array\r\ndef tensor2im(image_tensor, imtype=np.uint8, normalize=True):\r\n    if isinstance(image_tensor, list):\r\n        image_numpy = []\r\n        for i in range(len(image_tensor)):\r\n            image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))\r\n        return image_numpy\r\n    image_numpy = image_tensor.cpu().float().numpy()\r\n    if normalize:\r\n        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0\r\n    else:\r\n        image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\r\n    image_numpy = np.clip(image_numpy, 0, 255)\r\n    if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:\r\n        image_numpy = image_numpy[:, :, 0]\r\n    return image_numpy.astype(imtype)\r\n\r\n\r\n# Converts a one-hot tensor into a colorful label map\r\ndef tensor2label(label_tensor, n_label, imtype=np.uint8):\r\n    if n_label == 0:\r\n        return tensor2im(label_tensor, imtype)\r\n    label_tensor = label_tensor.cpu().float()\r\n    if label_tensor.size()[0] > 1:\r\n        label_tensor = label_tensor.max(0, keepdim=True)[1]\r\n    label_tensor = Colorize(n_label)(label_tensor)\r\n    label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))\r\n    return label_numpy.astype(imtype)\r\n\r\n\r\ndef save_image(image_numpy, image_path):\r\n    image_pil = Image.fromarray(image_numpy)\r\n    image_pil.save(image_path)\r\n\r\n\r\ndef mkdirs(paths):\r\n    if isinstance(paths, list) and not isinstance(paths, str):\r\n        for path in paths:\r\n            mkdir(path)\r\n    else:\r\n        mkdir(paths)\r\n\r\n\r\ndef mkdir(path):\r\n    if not os.path.exists(path):\r\n        os.makedirs(path)\r\n\r\n\r\n###############################################################################\r\n# Code from\r\n# https://github.com/ycszen/pytorch-seg/blob/master/transform.py\r\n# Modified so it complies with the Citscape label map colors\r\n###############################################################################\r\ndef uint82bin(n, count=8):\r\n    \"\"\"returns the binary of integer n, count refers to amount of bits\"\"\"\r\n    return \"\".join([str((n >> y) & 1) for y in range(count - 1, -1, -1)])\r\n\r\n\r\ndef labelcolormap(N):\r\n    if N == 35:  # cityscape\r\n        cmap = np.array(\r\n            [\r\n                (0, 0, 0),\r\n                (0, 0, 0),\r\n                (0, 0, 0),\r\n                (0, 0, 0),\r\n                (0, 0, 0),\r\n                (111, 74, 0),\r\n                (81, 0, 81),\r\n                (128, 64, 128),\r\n                (244, 35, 232),\r\n                (250, 170, 160),\r\n                (230, 150, 140),\r\n                (70, 70, 70),\r\n                (102, 102, 156),\r\n                (190, 153, 153),\r\n                (180, 165, 180),\r\n                (150, 100, 100),\r\n                (150, 120, 90),\r\n                (153, 153, 153),\r\n                (153, 153, 153),\r\n                (250, 170, 30),\r\n                (220, 220, 0),\r\n                (107, 142, 35),\r\n                (152, 251, 152),\r\n                (70, 130, 180),\r\n                (220, 20, 60),\r\n                (255, 0, 0),\r\n                (0, 0, 142),\r\n                (0, 0, 70),\r\n                (0, 60, 100),\r\n                (0, 0, 90),\r\n                (0, 0, 110),\r\n                (0, 80, 100),\r\n                (0, 0, 230),\r\n                (119, 11, 32),\r\n                (0, 0, 142),\r\n            ],\r\n            dtype=np.uint8,\r\n        )\r\n    else:\r\n        cmap = np.zeros((N, 3), dtype=np.uint8)\r\n        for i in range(N):\r\n            r, g, b = 0, 0, 0\r\n            id = i\r\n            for j in range(7):\r\n                str_id = uint82bin(id)\r\n                r = r ^ (np.uint8(str_id[-1]) << (7 - j))\r\n                g = g ^ (np.uint8(str_id[-2]) << (7 - j))\r\n                b = b ^ (np.uint8(str_id[-3]) << (7 - j))\r\n                id = id >> 3\r\n            cmap[i, 0] = r\r\n            cmap[i, 1] = g\r\n            cmap[i, 2] = b\r\n    return cmap\r\n\r\n\r\ndef _totensor(array):\r\n    tensor = torch.from_numpy(array)\r\n    img = tensor.transpose(0, 1).transpose(0, 2).contiguous()\r\n    return img.float().div(255)\r\n\r\n\r\ndef load_parsing_model(path, use_mask, use_gpu):\r\n    if use_mask:\r\n        n_classes = 19\r\n        net = BiSeNet(n_classes=n_classes)\r\n        if use_gpu:\r\n            net.to(\"mps\" if torch.backends.mps.is_available() else \"cuda\")\r\n            net.load_state_dict(torch.load(path))\r\n        else:\r\n            net.cpu()\r\n            net.load_state_dict(torch.load(path, map_location=torch.device(\"cpu\")))\r\n\r\n        net.eval()\r\n        return net\r\n    else:\r\n        return None\r\n\r\n\r\ndef crop_align(\r\n    detect_model, img_a_whole, crop_size, use_gpu, transformer_Arcface, swap_model\r\n):\r\n\r\n    face_detection = detect_model.get(img_a_whole, crop_size)\r\n    if face_detection is None:\r\n        return None\r\n\r\n    img_a_align_crop, _ = face_detection\r\n    img_a_align_crop_pil = Image.fromarray(\r\n        cv2.cvtColor(img_a_align_crop[0], cv2.COLOR_BGR2RGB)\r\n    )\r\n    img_a = transformer_Arcface(img_a_align_crop_pil)\r\n    img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])\r\n\r\n    # convert numpy to tensor\r\n    img_id = (\r\n        img_id.to(\"mps\")\r\n        if torch.backends.mps.is_available()\r\n        else \"cuda\"\r\n        if use_gpu\r\n        else img_id.cpu()\r\n    )\r\n\r\n    # create latent id\r\n    img_id_downsample = F.interpolate(img_id, size=(112, 112))\r\n    id_vector = swap_model.netArc(img_id_downsample)\r\n    id_vector = id_vector.detach().to(\"cpu\")\r\n    id_vector = id_vector / np.linalg.norm(id_vector, axis=1, keepdims=True)\r\n\r\n    id_vector = id_vector.to(\"cuda\") if use_gpu else id_vector.to(\"cpu\")\r\n\r\n    return id_vector\r\n\r\n\r\nclass Colorize(object):\r\n    def __init__(self, n=35):\r\n        self.cmap = labelcolormap(n)\r\n        self.cmap = torch.from_numpy(self.cmap[:n])\r\n\r\n    def __call__(self, gray_image):\r\n        size = gray_image.size()\r\n        color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)\r\n\r\n        for label in range(0, len(self.cmap)):\r\n            mask = (label == gray_image[0]).cpu()\r\n            color_image[0][mask] = self.cmap[label][0]\r\n            color_image[1][mask] = self.cmap[label][1]\r\n            color_image[2][mask] = self.cmap[label][2]\r\n\r\n        return color_image\r\n"
  },
  {
    "path": "src/dot/ui/ui.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport os\nimport sys\nimport tkinter\nimport traceback\nfrom pathlib import Path\n\nimport click\nimport customtkinter\nimport yaml\n\nfrom dot.__main__ import run\n\ncustomtkinter.set_appearance_mode(\"Dark\")\ncustomtkinter.set_default_color_theme(\"blue\")\n\n\nclass ToolTip(object):\n    def __init__(self, widget):\n        self.widget = widget\n        self.tipwindow = None\n        self.id = None\n        self.x = self.y = 0\n\n    def showtip(self, text):\n        \"Display text in tooltip window\"\n        self.text = text\n        if self.tipwindow or not self.text:\n            return\n        x, y, cx, cy = self.widget.bbox(\"insert\")\n        x = x + self.widget.winfo_rootx() + 57\n        y = y + cy + self.widget.winfo_rooty() + 27\n        self.tipwindow = tw = tkinter.Toplevel(self.widget)\n        tw.wm_overrideredirect(1)\n        tw.wm_geometry(\"+%d+%d\" % (x, y))\n        label = tkinter.Label(\n            tw,\n            text=self.text,\n            justify=tkinter.LEFT,\n            background=\"#ffffff\",\n            relief=tkinter.SOLID,\n            borderwidth=1,\n            font=(\"arial\", \"10\", \"normal\"),\n        )\n        label.pack(ipadx=8, ipady=5)\n\n    def hidetip(self):\n        tw = self.tipwindow\n        self.tipwindow = None\n        if tw:\n            tw.destroy()\n\n\nclass ToplevelUsageWindow(customtkinter.CTkToplevel):\n    \"\"\"\n    The class of the usage window\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        self.title(\"Usage\")\n        self.geometry(f\"{700}x{470}\")\n        self.resizable(False, False)\n        self.attributes(\"-topmost\", True)\n\n        self.textbox = customtkinter.CTkTextbox(\n            master=self, width=700, height=550, corner_radius=0\n        )\n        self.textbox.grid(row=0, column=0, sticky=\"nsew\")\n        self.textbox.insert(\n            \"0.0\",\n            \"\"\"\n            source (str): The source image or video.\\n\n            target (Union[int, str]): The target image, video or camera id.\\n\n            config_file (str): Path to the configuration file for the deepfake.\\n\n            swap_type (str): The type of swap to run.\\n\n            gpen_type (str, optional): The type of gpen model to use. Defaults to None.\\n\n            gpen_path (str, optional): The path to the gpen models. Defaults to \"saved_models/gpen\".\\n\n            show_fps (bool, optional): Pass flag to show fps value. Defaults to False.\\n\n            use_gpu (bool, optional): Pass flag to use GPU else use CPU. Defaults to False.\\n\n            head_pose (bool): Estimates head pose before swap. Used by fomm.\\n\n            model_path (str, optional): The path to the model's weights. Defaults to None.\\n\n            parsing_model_path (str, optional): The path to the parsing model. Defaults to None.\\n\n            arcface_model_path (str, optional): The path to the arcface model. Defaults to None.\\n\n            checkpoints_dir (str, optional): The path to the checkpoints directory. Defaults to None.\\n\n            crop_size (int, optional): The size to crop the images to. Defaults to 224.\\n\n            \"\"\",\n        )\n        self.textbox.configure(state=tkinter.DISABLED)\n\n\nclass ToplevelAboutWindow(customtkinter.CTkToplevel):\n    \"\"\"\n    The class of the about window\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n\n        self.title(\"About DOT\")\n        self.geometry(f\"{700}x{300}\")\n        self.resizable(False, False)\n        self.attributes(\"-topmost\", True)\n\n        self.textbox = customtkinter.CTkTextbox(\n            master=self, width=700, height=300, corner_radius=0\n        )\n        self.textbox.grid(row=0, column=0, sticky=\"nsew\")\n\n        self.textbox.insert(\n            \"0.0\",\n            \"\"\"\n            DOT (aka Deepfake Offensive Toolkit) makes real-time, controllable deepfakes ready for virtual \\n\n            cameras injection. DOT is created for performing penetration testing against e.g. identity \\n\n            verification and video conferencing systems, for the use by security analysts, \\n\n            Red Team members, and biometrics researchers. DOT is developed for research and demonstration purposes. \\n\n            As an end user, you have the responsibility to obey all applicable laws when using this program. \\n\n            Authors and contributing developers assume no liability and are not responsible for any misuse \\n\n            or damage caused by the use of this program.\n            \"\"\",\n        )\n        self.textbox.configure(state=tkinter.DISABLED)\n\n\nclass TabView:\n    \"\"\"\n    A class to handle the layout and functionality for each tab.\n    \"\"\"\n\n    def __init__(self, tab_view, target_tip_text, use_image=False, use_video=False):\n        self.tab_view = tab_view\n        self.target_tip_text = target_tip_text\n        self.use_image = use_image\n        self.use_video = use_video\n        self.save_folder = None\n\n        self.resources_path = \"\"\n\n        # MacOS bundle has different resource directory structure\n        if sys.platform == \"darwin\":\n            if getattr(sys, \"frozen\", False):\n                self.resources_path = os.path.join(\n                    str(Path(sys.executable).resolve().parents[0]).replace(\"MacOS\", \"\"),\n                    \"Resources\",\n                )\n\n        self.setup_ui()\n\n    def setup_ui(self):\n        # create entry text for source, target and config\n        self.source_target_config_frame = customtkinter.CTkFrame(self.tab_view)\n        self.source_target_config_frame.grid(\n            row=0, column=0, padx=(20, 20), pady=(20, 0), sticky=\"nsew\"\n        )\n\n        self.source_label = customtkinter.CTkLabel(\n            master=self.source_target_config_frame, text=\"source\"\n        )\n\n        self.source = customtkinter.CTkEntry(\n            master=self.source_target_config_frame,\n            placeholder_text=\"source\",\n            width=85,\n        )\n        self.source_button = customtkinter.CTkButton(\n            master=self.source_target_config_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_file_action(self.source),\n            width=10,\n        )\n\n        self.target = customtkinter.CTkEntry(\n            master=self.source_target_config_frame, placeholder_text=\"target\", width=85\n        )\n        self.target_label = customtkinter.CTkLabel(\n            master=self.source_target_config_frame, text=\"target\"\n        )\n        if (self.use_image) or (self.use_video):\n            self.target_button = customtkinter.CTkButton(\n                master=self.source_target_config_frame,\n                fg_color=\"gray\",\n                text_color=\"white\",\n                text=\"Open\",\n                command=lambda: self.upload_file_action(self.target),\n                width=10,\n            )\n\n            self.save_folder = customtkinter.CTkEntry(\n                master=self.source_target_config_frame,\n                placeholder_text=\"save_folder\",\n                width=85,\n            )\n            self.save_folder_label = customtkinter.CTkLabel(\n                master=self.source_target_config_frame, text=\"save_folder\"\n            )\n            self.save_folder_button = customtkinter.CTkButton(\n                master=self.source_target_config_frame,\n                fg_color=\"gray\",\n                text_color=\"white\",\n                text=\"Open\",\n                command=lambda: self.upload_folder_action(self.save_folder),\n                width=10,\n            )\n\n        self.config_file_var = customtkinter.StringVar(\n            value=\"Select\"\n        )  # set initial value\n\n        self.config_file_combobox = customtkinter.CTkOptionMenu(\n            master=self.source_target_config_frame,\n            values=[\"fomm\", \"faceswap_cv2\", \"simswap\", \"simswaphq\"],\n            command=self.optionmenu_callback,\n            variable=self.config_file_var,\n            width=85,\n            button_color=\"#3C3C3C\",\n            fg_color=\"#343638\",\n            dynamic_resizing=False,\n        )\n\n        self.config_file = customtkinter.CTkEntry(\n            master=self.source_target_config_frame, placeholder_text=\"config\", width=85\n        )\n        self.config_file_label = customtkinter.CTkLabel(\n            master=self.source_target_config_frame, text=\"config_file\"\n        )\n        self.config_file_button = customtkinter.CTkButton(\n            master=self.source_target_config_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_action_config_file(\n                self.config_file_combobox, self.config_file_var\n            ),\n            width=10,\n        )\n\n        self.source_label.grid(row=1, column=0, pady=(32, 10), padx=30, sticky=\"w\")\n        self.source.grid(row=1, column=0, pady=(32, 10), padx=(80, 20), sticky=\"w\")\n        self.source_button.grid(\n            row=1,\n            column=0,\n            pady=(32, 10),\n            padx=(175, 20),\n            sticky=\"w\",\n        )\n        self.CreateToolTip(\n            self.source,\n            text=\"The path of the source directory that contains a set of images\\n\"\n            \"or the path of one image intended for utilization in the deepfake generation process\",\n        )\n\n        self.target.grid(row=2, column=0, pady=10, padx=(80, 20), sticky=\"w\")\n        if (self.use_image) or (self.use_video):\n            self.target_button.grid(\n                row=2,\n                column=0,\n                pady=(10, 10),\n                padx=(175, 20),\n                sticky=\"w\",\n            )\n        self.target_label.grid(row=2, column=0, pady=10, padx=(35, 20), sticky=\"w\")\n        if (not self.use_image) and (not self.use_video):\n            self.target.insert(0, 0)\n\n        self.CreateToolTip(self.target, text=self.target_tip_text)\n\n        if (self.use_image) or (self.use_video):\n            self.save_folder.grid(row=3, column=0, pady=10, padx=(80, 20), sticky=\"w\")\n\n            self.save_folder_button.grid(\n                row=3,\n                column=0,\n                pady=(10, 10),\n                padx=(175, 20),\n                sticky=\"w\",\n            )\n            self.save_folder_label.grid(row=3, column=0, pady=10, padx=5, sticky=\"w\")\n\n            self.CreateToolTip(self.save_folder, text=\"The path to the save folder\")\n\n        self.config_file_combobox.grid(\n            row=4, column=0, pady=10, padx=(80, 20), sticky=\"w\"\n        )\n        self.config_file_label.grid(row=4, column=0, pady=10, padx=10, sticky=\"w\")\n\n        self.config_file_button.grid(\n            row=4,\n            column=0,\n            pady=10,\n            padx=(175, 20),\n            sticky=\"w\",\n        )\n        self.CreateToolTip(\n            self.config_file_combobox, text=\"Configuration file for the deepfake\"\n        )\n\n        # create entry text for dot options\n        self.option_entry_frame = customtkinter.CTkFrame(self.tab_view)\n        self.option_entry_frame.grid(\n            row=1, column=0, columnspan=4, padx=(20, 20), pady=(20, 0), sticky=\"nsew\"\n        )\n\n        self.advanced_options = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"Advanced\"\n        )\n\n        self.model_path_label = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"model_path\"\n        )\n        self.model_path = customtkinter.CTkEntry(\n            master=self.option_entry_frame, placeholder_text=\"model_path\", width=85\n        )\n\n        self.parsing_model_path_label = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"parsing_model\"\n        )\n        self.parsing_model_path = customtkinter.CTkEntry(\n            master=self.option_entry_frame,\n            placeholder_text=\"parsing_model_path\",\n            width=85,\n        )\n\n        self.arcface_model_path_label = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"arcface_model\"\n        )\n        self.arcface_model_path = customtkinter.CTkEntry(\n            master=self.option_entry_frame,\n            placeholder_text=\"arcface_model_path\",\n            width=85,\n        )\n\n        self.checkpoints_dir_label = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"checkpoints_dir\"\n        )\n        self.checkpoints_dir = customtkinter.CTkEntry(\n            master=self.option_entry_frame, placeholder_text=\"checkpoints_dir\", width=85\n        )\n\n        self.gpen_path_label = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"gpen_path\"\n        )\n        self.gpen_path = customtkinter.CTkEntry(\n            master=self.option_entry_frame, placeholder_text=\"gpen_path\", width=85\n        )\n\n        self.crop_size_label = customtkinter.CTkLabel(\n            master=self.option_entry_frame, text=\"crop_size\"\n        )\n        self.crop_size = customtkinter.CTkEntry(\n            master=self.option_entry_frame, placeholder_text=\"crop_size\"\n        )\n\n        self.model_path_button = customtkinter.CTkButton(\n            master=self.option_entry_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_file_action(self.model_path),\n            width=10,\n        )\n        self.parsing_model_path_button = customtkinter.CTkButton(\n            master=self.option_entry_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_file_action(self.parsing_model_path),\n            width=10,\n        )\n        self.arcface_model_path_button = customtkinter.CTkButton(\n            master=self.option_entry_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_file_action(self.arcface_model_path),\n            width=10,\n        )\n        self.checkpoints_dir_button = customtkinter.CTkButton(\n            master=self.option_entry_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_file_action(self.checkpoints_dir),\n            width=10,\n        )\n        self.gpen_path_button = customtkinter.CTkButton(\n            master=self.option_entry_frame,\n            fg_color=\"gray\",\n            text_color=\"white\",\n            text=\"Open\",\n            command=lambda: self.upload_file_action(self.gpen_path),\n            width=10,\n        )\n\n        self.advanced_options.grid(row=0, column=0, pady=10, padx=(20, 20), sticky=\"w\")\n\n        self.model_path_label.grid(row=1, column=2, pady=10, padx=(40, 20), sticky=\"w\")\n        self.model_path.grid(row=1, column=2, pady=10, padx=(115, 20), sticky=\"w\")\n        self.model_path_button.grid(\n            row=1,\n            column=2,\n            pady=10,\n            padx=(210, 20),\n            sticky=\"w\",\n        )\n\n        self.parsing_model_path_label.grid(\n            row=2, column=2, pady=10, padx=(23, 20), sticky=\"w\"\n        )\n        self.parsing_model_path.grid(\n            row=2, column=2, pady=10, padx=(115, 20), sticky=\"w\"\n        )\n        self.parsing_model_path_button.grid(\n            row=2,\n            column=2,\n            pady=10,\n            padx=(210, 20),\n            sticky=\"w\",\n        )\n\n        self.arcface_model_path_label.grid(\n            row=3, column=2, pady=10, padx=(21, 20), sticky=\"w\"\n        )\n        self.arcface_model_path.grid(\n            row=3, column=2, pady=10, padx=(115, 20), sticky=\"w\"\n        )\n        self.arcface_model_path_button.grid(\n            row=3,\n            column=2,\n            pady=10,\n            padx=(210, 20),\n            sticky=\"w\",\n        )\n\n        self.checkpoints_dir_label.grid(\n            row=1, column=3, pady=10, padx=(16, 20), sticky=\"w\"\n        )\n        self.checkpoints_dir.grid(row=1, column=3, pady=10, padx=(115, 20), sticky=\"w\")\n        self.checkpoints_dir_button.grid(\n            row=1,\n            column=3,\n            pady=10,\n            padx=(210, 20),\n            sticky=\"w\",\n        )\n\n        self.gpen_path_label.grid(row=2, column=3, pady=10, padx=(48, 20), sticky=\"w\")\n        self.gpen_path.grid(row=2, column=3, pady=10, padx=(115, 20), sticky=\"w\")\n        self.gpen_path_button.grid(\n            row=2,\n            column=3,\n            pady=10,\n            padx=(210, 20),\n            sticky=\"w\",\n        )\n\n        self.crop_size_label.grid(row=3, column=3, pady=10, padx=(50, 20), sticky=\"w\")\n        self.crop_size.grid(row=3, column=3, pady=10, padx=(115, 20), sticky=\"w\")\n\n        self.CreateToolTip(self.crop_size, text=\"The size of the image crop\")\n        self.CreateToolTip(self.gpen_path, text=\"The path to the gpen models\")\n        self.CreateToolTip(\n            self.checkpoints_dir,\n            text=\"The path to the checkpoints directory. Used by SimSwap\",\n        )\n        self.CreateToolTip(\n            self.arcface_model_path,\n            text=\"The path to the arcface model. Used by SimSwap\",\n        )\n        self.CreateToolTip(\n            self.parsing_model_path,\n            text=\"The path to the parsing model. Used by SimSwap\",\n        )\n        self.CreateToolTip(\n            self.model_path, text=\"The path to the model's weights. Used by fomm\"\n        )\n\n        # create radiobutton frame for swap_type\n        self.swap_type_frame = customtkinter.CTkFrame(self.tab_view)\n        self.swap_type_frame.grid(\n            row=0, column=1, padx=(20, 20), pady=(20, 0), sticky=\"nsew\"\n        )\n        self.swap_type_radio_var = tkinter.StringVar(value=None)\n        self.swap_type_label_radio_group = customtkinter.CTkLabel(\n            master=self.swap_type_frame, text=\"swap_type\"\n        )\n        self.swap_type_label_radio_group.grid(\n            row=0, column=2, columnspan=1, padx=10, pady=10, sticky=\"\"\n        )\n        self.fomm_radio_button = customtkinter.CTkRadioButton(\n            master=self.swap_type_frame,\n            variable=self.swap_type_radio_var,\n            value=\"fomm\",\n            text=\"fomm\",\n        )\n        self.fomm_radio_button.grid(row=1, column=2, pady=10, padx=20, sticky=\"w\")\n        self.CreateToolTip(self.fomm_radio_button, text=\"Use the deepfake from fomm\")\n\n        self.faceswap_cv2_radio_button = customtkinter.CTkRadioButton(\n            master=self.swap_type_frame,\n            variable=self.swap_type_radio_var,\n            value=\"faceswap_cv2\",\n            text=\"faceswap_cv2\",\n        )\n        self.faceswap_cv2_radio_button.grid(\n            row=2, column=2, pady=10, padx=20, sticky=\"w\"\n        )\n        self.CreateToolTip(\n            self.faceswap_cv2_radio_button, text=\"Use the deepfake from faceswap cv2\"\n        )\n\n        self.simswap_radio_button = customtkinter.CTkRadioButton(\n            master=self.swap_type_frame,\n            variable=self.swap_type_radio_var,\n            value=\"simswap\",\n            text=\"simswap\",\n        )\n        self.simswap_radio_button.grid(row=3, column=2, pady=10, padx=20, sticky=\"w\")\n        self.CreateToolTip(\n            self.simswap_radio_button, text=\"Use the deepfake from SimSwap\"\n        )\n\n        # create radiobutton frame for gpen_type\n        self.gpen_type_frame = customtkinter.CTkFrame(self.tab_view)\n        self.gpen_type_frame.grid(\n            row=0, column=2, padx=(20, 20), pady=(20, 0), sticky=\"nsew\"\n        )\n        self.gpen_type_radio_var = tkinter.StringVar(value=\"\")\n        self.gpen_type_label_radio_group = customtkinter.CTkLabel(\n            master=self.gpen_type_frame, text=\"gpen_type\"\n        )\n        self.gpen_type_label_radio_group.grid(\n            row=0, column=2, columnspan=1, padx=10, pady=10, sticky=\"\"\n        )\n        self.gpen_type_radio_button_1 = customtkinter.CTkRadioButton(\n            master=self.gpen_type_frame,\n            variable=self.gpen_type_radio_var,\n            value=\"gpen_256\",\n            text=\"gpen_256\",\n        )\n\n        self.gpen_type_radio_button_1.grid(\n            row=1, column=2, pady=10, padx=20, sticky=\"w\"\n        )\n        self.CreateToolTip(\n            self.gpen_type_radio_button_1, text=\"Apply face restoration with GPEN 256\"\n        )\n\n        self.gpen_type_radio_button_2 = customtkinter.CTkRadioButton(\n            master=self.gpen_type_frame,\n            variable=self.gpen_type_radio_var,\n            value=\"gpen_512\",\n            text=\"gpen_512\",\n        )\n        self.gpen_type_radio_button_2.grid(\n            row=2, column=2, pady=10, padx=20, sticky=\"w\"\n        )\n        self.CreateToolTip(\n            self.gpen_type_radio_button_2, text=\"Apply face restoration with GPEN 512\"\n        )\n\n        # create checkbox and switch frame\n        self.checkbox_slider_frame = customtkinter.CTkFrame(self.tab_view)\n        self.checkbox_slider_frame.grid(\n            row=0, column=3, padx=(20, 20), pady=(20, 0), sticky=\"nsew\"\n        )\n\n        self.show_fps_checkbox_var = tkinter.IntVar()\n        self.show_fps_checkbox = customtkinter.CTkCheckBox(\n            master=self.checkbox_slider_frame,\n            text=\"show_fps\",\n            variable=self.show_fps_checkbox_var,\n        )\n\n        self.use_gpu_checkbox_var = tkinter.IntVar()\n        self.use_gpu_checkbox = customtkinter.CTkCheckBox(\n            master=self.checkbox_slider_frame,\n            text=\"use_gpu\",\n            variable=self.use_gpu_checkbox_var,\n        )\n\n        self.head_pose_checkbox_var = tkinter.IntVar()\n        self.head_pose_checkbox = customtkinter.CTkCheckBox(\n            master=self.checkbox_slider_frame,\n            text=\"head_pose\",\n            variable=self.head_pose_checkbox_var,\n        )\n\n        self.show_fps_checkbox.grid(row=1, column=3, pady=(39, 0), padx=20, sticky=\"w\")\n        self.use_gpu_checkbox.grid(row=2, column=3, pady=(20, 0), padx=20, sticky=\"w\")\n        self.head_pose_checkbox.grid(row=5, column=3, pady=(20, 0), padx=20, sticky=\"w\")\n        self.CreateToolTip(self.show_fps_checkbox, text=\"Show the fps value\")\n        self.CreateToolTip(\n            self.use_gpu_checkbox,\n            text=\"If checked, the deepfake will use the GPU.\\n\"\n            \"If it's not checked, the deepfake will use the CPU\",\n        )\n        self.CreateToolTip(\n            self.head_pose_checkbox, text=\"Estimate head pose before swap. Used by fomm\"\n        )\n\n        # create run button\n        self.error_label = customtkinter.CTkLabel(\n            master=self.tab_view, text_color=\"red\", text=\"\"\n        )\n        self.error_label.grid(\n            row=4, column=0, columnspan=4, padx=(20, 20), pady=(0, 20), sticky=\"nsew\"\n        )\n\n        self.run_button = customtkinter.CTkButton(\n            master=self.tab_view,\n            fg_color=\"white\",\n            border_width=2,\n            text_color=\"black\",\n            text=\"RUN\",\n            height=40,\n            command=lambda: self.start_button_event(self.error_label),\n        )\n        self.run_button.grid(\n            row=2, column=1, columnspan=2, padx=(50, 150), pady=(20, 0), sticky=\"nsew\"\n        )\n        self.CreateToolTip(self.run_button, text=\"Start running the deepfake\")\n\n        self.run_label = customtkinter.CTkLabel(\n            master=self.tab_view,\n            text=\"The initial execution of dot may require a few minutes to complete.\",\n            text_color=\"gray\",\n        )\n        self.run_label.grid(\n            row=3, column=0, columnspan=3, padx=(180, 0), pady=(0, 20), sticky=\"nsew\"\n        )\n\n    def modify_entry(self, entry_element: customtkinter.CTkEntry, text: str):\n        \"\"\"\n        Modify the value of the CTkEntry\n\n        Args:\n            entry_element (customtkinter.CTkOptionMenu): The CTkEntry element.\n            text (str): The new text that will be inserted into the CTkEntry\n        \"\"\"\n\n        entry_element.delete(0, tkinter.END)\n        entry_element.insert(0, text)\n\n    def upload_action_config_file(\n        self,\n        element: customtkinter.CTkOptionMenu,\n        config_file_var: customtkinter.StringVar,\n    ):\n        \"\"\"\n        Set the configurations for the swap_type using the upload button\n\n        Args:\n            element (customtkinter.CTkOptionMenu): The OptionMenu element.\n            config_file_var (customtkinter.StringVar): OptionMenu variable.\n        \"\"\"\n\n        entry_list = [\n            \"source\",\n            \"target\",\n            \"model_path\",\n            \"parsing_model_path\",\n            \"arcface_model_path\",\n            \"checkpoints_dir\",\n            \"gpen_path\",\n            \"crop_size\",\n        ]\n        radio_list = [\"swap_type\"]\n\n        filename = tkinter.filedialog.askopenfilename()\n\n        config = {}\n        if len(filename) > 0:\n            with open(filename) as f:\n                config = yaml.safe_load(f)\n\n        if config[\"swap_type\"] == \"simswap\":\n            if config.get(\"swap_type\", \"0\") == \"512\":\n                config_file_var = \"simswaphq\"\n            else:\n                config_file_var = \"simswap\"\n        else:\n            config_file_var = config[\"swap_type\"]\n\n        element.set(config_file_var)\n\n        for key, value in config.items():\n            if key in entry_list:\n                self.modify_entry(getattr(self, key), value)\n            elif key in radio_list:\n                self.swap_type_radio_var = tkinter.StringVar(value=value)\n                radio_button = getattr(self, f\"{value}_radio_button\")\n                radio_button.invoke()\n\n        for entry in entry_list:\n            if (entry not in [\"source\", \"target\"]) and (entry not in config):\n                self.modify_entry(getattr(self, entry), \"\")\n\n    def CreateToolTip(self, widget, text):\n        toolTip = ToolTip(widget)\n\n        def enter(event):\n            toolTip.showtip(text)\n\n        def leave(event):\n            toolTip.hidetip()\n\n        widget.bind(\"<Enter>\", enter)\n        widget.bind(\"<Leave>\", leave)\n\n    def start_button_event(self, error_label):\n        \"\"\"\n        Start running the deepfake\n        \"\"\"\n        try:\n            error_label.configure(text=\"\")\n\n            # load config, if provided\n            config = {}\n            if len(self.config_file.get()) > 0:\n                with open(self.config_file.get()) as f:\n                    config = yaml.safe_load(f)\n\n            # run dot\n            run(\n                swap_type=config.get(\n                    \"swap_type\", self.swap_type_radio_var.get() or None\n                ),\n                source=config.get(\"source\", self.source.get() or None),\n                target=config.get(\"target\", self.target.get() or None),\n                model_path=config.get(\"model_path\", self.model_path.get() or None),\n                parsing_model_path=config.get(\n                    \"parsing_model_path\", self.parsing_model_path.get() or None\n                ),\n                arcface_model_path=config.get(\n                    \"arcface_model_path\", self.arcface_model_path.get() or None\n                ),\n                checkpoints_dir=config.get(\n                    \"checkpoints_dir\", self.checkpoints_dir.get() or None\n                ),\n                gpen_type=config.get(\"gpen_type\", self.gpen_type_radio_var.get()),\n                gpen_path=config.get(\n                    \"gpen_path\", self.gpen_path.get() or \"saved_models/gpen\"\n                ),\n                crop_size=config.get(\n                    \"crop_size\",\n                    (\n                        int(self.crop_size.get())\n                        if len(self.crop_size.get()) > 0\n                        else None\n                    )\n                    or 224,\n                ),\n                head_pose=config.get(\"head_pose\", int(self.head_pose_checkbox.get())),\n                save_folder=self.save_folder.get()\n                if self.save_folder is not None\n                else None,\n                show_fps=config.get(\"show_fps\", int(self.show_fps_checkbox.get())),\n                use_gpu=config.get(\"use_gpu\", int(self.use_gpu_checkbox.get())),\n                use_video=self.use_video,\n                use_image=self.use_image,\n                limit=None,\n            )\n        except Exception as e:\n            print(e)\n            print(traceback.format_exc())\n            error_label.configure(text=e)\n\n    def upload_folder_action(self, entry_element: customtkinter.CTkOptionMenu):\n        \"\"\"\n        Action for the upload folder buttons to update the value of a CTkEntry\n\n        Args:\n            entry_element (customtkinter.CTkOptionMenu): The CTkEntry element.\n        \"\"\"\n\n        foldername = tkinter.filedialog.askdirectory()\n        self.modify_entry(entry_element, foldername)\n\n    def upload_file_action(self, entry_element: customtkinter.CTkOptionMenu):\n        \"\"\"\n        Action for the upload file buttons to update the value of a CTkEntry\n\n        Args:\n            entry_element (customtkinter.CTkOptionMenu): The CTkEntry element.\n        \"\"\"\n\n        filename = tkinter.filedialog.askopenfilename()\n        self.modify_entry(entry_element, filename)\n\n    def optionmenu_callback(self, choice: str):\n        \"\"\"\n        Set the configurations for the swap_type using the optionmenu\n\n        Args:\n            choice (str): The type of swap to run.\n        \"\"\"\n\n        entry_list = [\"source\", \"target\", \"crop_size\"]\n        radio_list = [\"swap_type\", \"gpen_type\"]\n        model_list = [\n            \"model_path\",\n            \"parsing_model_path\",\n            \"arcface_model_path\",\n            \"checkpoints_dir\",\n            \"gpen_path\",\n        ]\n\n        config_file = os.path.join(self.resources_path, f\"configs/{choice}.yaml\")\n\n        if os.path.isfile(config_file):\n            config = {}\n            with open(config_file) as f:\n                config = yaml.safe_load(f)\n\n            for key in config.keys():\n                if key in entry_list:\n                    self.modify_entry(eval(f\"self.{key}\"), config[key])\n                elif key in radio_list:\n                    if key == \"swap_type\":\n                        self.swap_type_radio_var = tkinter.StringVar(value=config[key])\n                    elif key == \"gpen_type\":\n                        self.gpen_type_radio_var = tkinter.StringVar(value=config[key])\n                    eval(f\"self.{config[key]}_radio_button\").invoke()\n                elif key in model_list:\n                    self.modify_entry(\n                        eval(f\"self.{key}\"),\n                        os.path.join(self.resources_path, config[key]),\n                    )\n\n            for entry in entry_list:\n                if entry not in [\"source\", \"target\"]:\n                    if entry not in config.keys():\n                        self.modify_entry(eval(f\"self.{entry}\"), \"\")\n\n\nclass App(customtkinter.CTk):\n    \"\"\"\n    The main class of the ui interface\n    \"\"\"\n\n    def __init__(self):\n        super().__init__()\n\n        # configure window\n        self.title(\"Deepfake Offensive Toolkit\")\n        self.geometry(f\"{835}x{600}\")\n        self.resizable(False, False)\n\n        self.grid_columnconfigure((0, 1), weight=1)\n        self.grid_rowconfigure((0, 1, 2, 3), weight=1)\n\n        # create menubar\n        menubar = tkinter.Menu(self)\n\n        filemenu = tkinter.Menu(menubar, tearoff=0)\n        filemenu.add_command(label=\"Exit\", command=self.quit)\n        menubar.add_cascade(label=\"File\", menu=filemenu)\n\n        helpmenu = tkinter.Menu(menubar, tearoff=0)\n        helpmenu.add_command(label=\"Usage\", command=self.usage_window)\n        helpmenu.add_separator()\n        helpmenu.add_command(label=\"About DOT\", command=self.about_window)\n        menubar.add_cascade(label=\"Help\", menu=helpmenu)\n\n        self.config(menu=menubar)\n\n        self.toplevel_usage_window = None\n        self.toplevel_about_window = None\n\n        tabview = customtkinter.CTkTabview(self)\n        tabview.pack(padx=0, pady=0)\n        live_tab = tabview.add(\"Live\")\n        image_tab = tabview.add(\"Image\")\n        video_tab = tabview.add(\"Video\")\n\n        self.live_tab_view = TabView(\n            live_tab, target_tip_text=\"The camera id. Usually 0 is the correct id\"\n        )\n        self.image_tab_view = TabView(\n            image_tab,\n            target_tip_text=\"target images folder or certain image file\",\n            use_image=True,\n        )\n        self.video_tab_view = TabView(\n            video_tab,\n            target_tip_text=\"target videos folder or certain video file\",\n            use_video=True,\n        )\n\n    def usage_window(self):\n        \"\"\"\n        Open the usage window\n        \"\"\"\n\n        if (\n            self.toplevel_usage_window is None\n            or not self.toplevel_usage_window.winfo_exists()\n        ):\n            self.toplevel_usage_window = ToplevelUsageWindow(\n                self\n            )  # create window if its None or destroyed\n        self.toplevel_usage_window.focus()\n\n    def about_window(self):\n        \"\"\"\n        Open the about window\n        \"\"\"\n\n        if (\n            self.toplevel_about_window is None\n            or not self.toplevel_about_window.winfo_exists()\n        ):\n            self.toplevel_about_window = ToplevelAboutWindow(\n                self\n            )  # create window if its None or destroyed\n        self.toplevel_about_window.focus()\n\n\n@click.command()\ndef main():\n    \"\"\"Run the dot UI.\"\"\"\n\n    app = App()\n    app.mainloop()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "tests/pipeline_test.py",
    "content": "#!/usr/bin/env python3\n\"\"\"\nCopyright (c) 2022, Sensity B.V. All rights reserved.\nlicensed under the BSD 3-Clause \"New\" or \"Revised\" License.\n\"\"\"\n\nimport unittest\nfrom unittest import mock\n\nfrom dot import DOT\n\n\ndef fake_generate(self, option, source, target, show_fps=False, **kwargs):\n    return [[None], [None]]\n\n\n@mock.patch.object(DOT, \"generate\", fake_generate)\nclass TestDotOptions(unittest.TestCase):\n    def setUp(self):\n        self._dot = DOT(use_image=True, save_folder=\"./tests\")\n\n        self.faceswap_cv2_option = self._dot.faceswap_cv2(False, False, None)\n\n        self.fomm_option = self._dot.fomm(False, False, None)\n\n        self.simswap_option = self._dot.simswap(False, False, None)\n\n    def test_option_creation(self):\n\n        success, rejected = self._dot.generate(\n            self.faceswap_cv2_option,\n            \"./tests\",\n            \"./tests\",\n            show_fps=False,\n            model_path=None,\n            limit=5,\n        )\n        assert len(success) == 1\n        assert len(rejected) == 1\n\n        success, rejected = self._dot.generate(\n            self.fomm_option,\n            \"./tests\",\n            \"./tests\",\n            show_fps=False,\n            model_path=None,\n            limit=5,\n        )\n        assert len(success) == 1\n        assert len(rejected) == 1\n\n        success, rejected = self._dot.generate(\n            self.simswap_option,\n            \"./tests\",\n            \"./tests\",\n            show_fps=False,\n            parsing_model_path=None,\n            arcface_model_path=None,\n            checkpoints_dir=None,\n            limit=5,\n        )\n        assert len(success) == 1\n        assert len(rejected) == 1\n"
  }
]