[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\ncover/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\n.pybuilder/\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n#   For a library or package, you might want to ignore these files since the code is\n#   intended to run in multiple environments; otherwise, check them in:\n# .python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n.idea\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n\n# pytype static type analyzer\n.pytype/\n\n# Cython debug symbols\ncython_debug/"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# BMW-Anonymization-Api\n\nData privacy and individuals’ anonymity are and always have been a major concern for data-driven companies. \n\nTherefore, we designed and implemented an anonymization API that localizes and obfuscates (i.e. hides) sensitive information in images/videos in order to preserve the individuals' anonymity. The main features of our anonymization tool are the following:\n* **Agnostic in terms of localization techniques**: our API currently supports [Semantic segmentation](https://github.com/BMW-InnovationLab/BMW-Semantic-Segmentation-Inference-API-GPU-CPU) or [Object Detection](https://github.com/BMW-InnovationLab/BMW-TensorFlow-Inference-API-GPU).\n* **Modular in terms of sensitive information**: the user can train a Deep Learning (DL) model for [object detection](https://github.com/BMW-InnovationLab/BMW-TensorFlow-Training-GUI) and [semantic segmentation](https://github.com/BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI) to localize the sensitive information she/he wishes to protect, e.g., individual's face or body, personal belongings, vehicles...\n* **Scalable in terms of anonymization techniques**: our API currently supports pixelating, blurring, blackening (masking). Also, additinal anonymization techniques can be configured as stated below. For the highest level of privacy, we recommend using the blackening technique with degree 1.\n* **Supports DL-based models optimized via the [Intel&reg; OpenVINO&trade; toolkit v2021.1](https://docs.openvinotoolkit.org/latest/index.html) for CPU usage**: DL-based models optimized and deployed via the [Openvino Segmentation Inference API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Segmentation-Inference-API) and the [Openvino Detection Inference API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Inference-API) can also be used.\n* **Compatible with the BMW Deep Learning tools**: DL models trained via our [training](https://github.com/BMW-InnovationLab/BMW-TensorFlow-Training-GUI) and deployed via our [inference](https://github.com/BMW-InnovationLab/BMW-TensorFlow-Inference-API-GPU) APIs are compatible with this anonymization API. \n\n<p align=\"center\">\n  <img src=\"references/output_7.gif\" alt=\"animated\" />\n</p>\n\n\n## General Architecture & Deployment Mode:\n\nOur anonymization API receives an image along with a JSON object through which the user specifies mainly: \n* The sensitive information she/he wishes to obfuscate.\n* The anonymization technique.\n* The anonymization degree.\n* The localization technique. \n\n![](references/architecture_2.png) \n\nYou can deploy the anonymization API either:\n* As a standalone docker container which can be connected to other inference APIs ([object detection](https://github.com/BMW-InnovationLab/BMW-YOLOv4-Inference-API-CPU) or [semantic segmentation](https://github.com/BMW-InnovationLab/BMW-Semantic-Segmentation-Inference-API-GPU-CPU)) deployed within a standalone docker container as well.\n* As a network of docker containers along with other inference APIs running on the same machine via docker-compose. (please check the [following link](./docker_compose_readme.md) for the docker-compose deployment).\n\n\n## Prerequisites:\n- docker\n- docker-compose\n\n### Check for prerequisites\n\n#### To check if docker-ce is installed:\n```sh\ndocker --version\n```\n\n#### To check if docker-compose is installed:\n```sh\ndocker-compose --version\n```\n\n### Install prerequisites\n\n#### Ubuntu\n\nTo install [Docker](https://docs.docker.com/engine/install/ubuntu/) and [Docker Compose](https://docs.docker.com/compose/install/) on Ubuntu, please follow the link.\n\n#### Windows 10\n\nTo [install Docker on Windows](https://docs.docker.com/docker-for-windows/install/), please follow the link.\n\n**P.S: For Windows users, open the Docker Desktop menu by clicking the Docker Icon in the Notifications area. Select Settings, and then Advanced tab to adjust the resources available to Docker Engine.**\n\n## Build The Docker Image\n\nAs mentioned before, this container can be deployed using either **docker** or **docker-compose**.\n\n* If you wish to deploy this API using **docker-compose**, please refer to [following link](./docker_compose_readme.md). After deploying the API with docker compose, please consider returning to this documentation for further information about the API Endpoints and use configuration file sample sections.\n\n* If you wish to deploy this API using **docker**, please continue with the following docker build and run commands.\n\nIn order to build the project run the following command from the project's root directory:\n\n```sh\n docker build -t anonymization_api -f docker/dockerfile .\n```\n#### Build behind a proxy\nIn order to build the image behind a proxy use the following command in the project's root directory:\n```sh\ndocker build --build-arg http_proxy='your_proxy' --build-arg https_proxy='your_proxy' -t anonymization_api -f ./docker/dockerfile .\n```\n\n## Run the docker container\n\nTo run the API, go to the API's directory and run the following:\n\n#### Using Linux based docker:\n\n```sh\nsudo docker run -itv $(pwd)/src/main:/main -v $(pwd)/jsonFiles:/jsonFiles -p <port_of_your_choice>:4343 anonymization_api\n```\n##### Behind a proxy:\n```sh\nsudo docker run -itv $(pwd)/src/main:/main -v $(pwd)/jsonFiles:/jsonFiles  --env HTTP_PROXY=\"\" --env HTTPS_PROXY=\"\" --env http_proxy=\"\" --env https_proxy=\"\" -p 5555:4343 anonymization_api\n```\n\n#### Using Windows based docker:\n\n```sh\ndocker run -itv ${PWD}/src/main:/main -v ${PWD}/jsonFiles:/jsonFiles -p <port_of_your_choice>:4343 anonymization_api\n```\n\nThe API file will be run automatically, and the service will listen to http requests on the chosen port.\n\n## API Endpoints\n\nTo see all available endpoints, open your favorite browser and navigate to:\n\n```\nhttp://<machine_IP>:<docker_host_port>/docs\n```\n\n### Endpoints summary\n![](references/endpoints.png) \n\n#### Configuration \n\n\n##### /set_url (POST)\n\nSet the URL of the inference API that you wish to connect to the Anonymization API. If the specified URL is unreachable due to connection problems, it will not be added to the [JSON url_configuration file](https://github.com/BMW-InnovationLab/BMW-Anonymization-API/blob/master/jsonFiles/url_configuration.json). The URL should be specified in the following format  \"http://ip:port/\".\n\n##### /list_urls (GET)\n\nReturns the URLs of the inference APIs that were already configured via the /set_url POST request.\n\n##### /remove_url (POST)\n\nRemoves the specified URL from the [JSON url_configuration file](https://github.com/BMW-InnovationLab/BMW-Anonymization-API/blob/master/jsonFiles/url_configuration.json)\n\n##### /remove_all_urls (POST)\n\nRemoves all available urls from the [JSON url_configuration file](https://github.com/BMW-InnovationLab/BMW-Anonymization-API/blob/master/jsonFiles/url_configuration.json)\n\n##### /available_methods/ (GET)\n\nAfter setting the inference URLs via the /set_url request, the user can view the Anonymization API's configuration by issuing the /available_methods request. Mainly the user can view (i) the supported sensitive information (label_names) , (ii) the supported localization techniques, (iii) the inference URLs and (iv) the DL model name that are configured in the deployed anonymization API as seen below.\n\n\n\n\n<img alt=\"\" src=\"./references/available_methods.gif?raw=\" width=\"800\" >\n\n\n\n\n#### Anonymization \n##### /anonymize/ (POST)\n\nAnonymizes the input image based on the [user's JSON configuration file](https://github.com/BMW-InnovationLab/BMW-Anonymization-API/blob/master/jsonFiles/user_configuration.json)\n\n\n\n<img alt=\"\" src=\"./references/anonymize.gif?raw=\" width=\"800\" >\n\n\n\n\n##### /anonymize_video/ (POST)\n\nAnonymizes a video based on the user's sensitive info and save the anonymized video in `src/main/anonymized_videos` under <original_video_name>_TIMESTAMP.mp4\n\n\n\n<img src=\"./references/anonymize_video.gif?raw=\" width=\"800\" >\n\n\n#### Video Anonymization Time\nThe video might take a while, actually you can estimate the time that it may take by using the following formula:\n**Video_Anonymization_Time = Video_Length x Number_Of_Frames_Per_Second  x Anonymization_Time_Of_Each_Frame**\n\n\n## User configuration file sample\n\nIn order to anonymize an image, the user should specify the different details in the [user's JSON configuration file](https://github.com/BMW-InnovationLab/BMW-Anonymization-API/blob/master/jsonFiles/user_configuration.json)\n\nPlease check a sample in the below image:\n\n![](references/json_file.PNG)\n\nNote that the URL field is an optional field that you can add in case you wanted to use a specific URL of a running API. You can just add the URL as an optional field in this file as shown in the first sensitive info. In case this field is not specified, the URL defined in the url_configuration.json file will be used by default if it matches all the requirements. \n\n## To add a new technique to the API:\nPlease refer to the following link [add new technique documentation](references/techniques.md) for more information on how to add a new anonymization technique to the APIs with common and custom labels.\n\n## Benchmark\n\n### Object Detection\n\n|**GPU**|**Network**  |**Width**  |**Height**  |**Inference Time (s)**  |**Anonymization Time (s)** |**Total Time (s)** |\n|:-:|:-:|:-:|:-:|:-:|:-:|:-:|\n|Titan RTX  |  yolov4 |  640 | 768| 0.2  |0.07  |0.27 |\n|Titan RTX  |  yolov4 |  1024 | 768| 0.4  |0.14  |0.54 |\n|Titan RTX  |  yolov4 |  2048 | 1024| 1.2  |0.6  |1.8 |\n|Titan RTX  |  yolov4 |  3840 | 2160| 4.8  |0.6  |5.4 |\n\n\n\n### Object Detection with OpenVINO model and Intel Core i7-1185G7 \n\nThe model was trained with the TensorFlow Object Detection API (TF version 1.14) and then converted to OpenVINO IR using [Intel&reg; OpenVINO&trade; toolkit v2021.4](https://docs.openvinotoolkit.org/latest/index.html) </br>\n<span style=\"font-size:2em;\">Results may vary. For workloads and configurations visit: www.intel.com/PerformanceIndex and Legal Information. </span>\n\n|**CPU**|**Network**  |**Precision** |**Width**  |**Height**  |**Inference Time (s)**  |**Anonymization Time (s)** |**Total Time (s)** <br/> for Avg, Max, Min|\n|:-:|:-:|:-:|:-:|:-:|:-:|:-:|:-:|\n|Intel Core <br/> i7-1185G7  |  Faster R-CNN  <br/> Input Shape: [3,600,600]  |  FP32 | 1024| 768  |0.51  |0.09  |0.60, 0.67, 0.54  |\n|Intel Core <br/> i7-1185G7  |  Faster R-CNN  <br/> Input Shape: [3,600,600] |  FP32  | 2048 | 1536  |0.56  |0.24  |0.80,  0.97, 0.70  |\n|Intel Core <br/> i7-1185G7  |  Faster R-CNN  <br/> Input Shape: [3,600,600]  |  INT8 | 1024| 768  |0.16  |0.09  |0.25, 0.27, 0.22  |\n|Intel Core <br/> i7-1185G7  |  Faster R-CNN  <br/> Input Shape: [3,600,600] |  INT8| 2048 | 1536  |0.19  |0.24  |0.43,  0.56, 0.36  |\n\n\n### Semantic Segmentation\n\n|**GPU**|**Network**  |**Width**  |**Height**  |**Inference Time (s)**  |**Anonymization Time (s)** |**Total Time (s)** |\n|:-:|:-:|:-:|:-:|:-:|:-:|:-:|\n|Titan RTX  |  psp resnet 101 |  640 | 768| 0.2  |0.8 |1.1 |\n|Titan RTX  |  psp resnet 101 |  1024 | 768| 0.3  |0.8  |1.1 |\n|Titan RTX  |  psp resnet 101 |  2048 | 1024| 0.9  |1.0  |1.9 |\n|Titan RTX  |  psp resnet 101 |  3840 | 2160| 2.0  |3.0  |5.0 |\n\n\n\n## Possible Error\n\n- You may encounter the below error when running the docker container at startup in standalone version or docker-compose version ![url_error](references/url_error.png) \n- In case you do, please make sure that the URL of the inference APIs listed in the  `jsonFiles/url_configuration.json` are still recheable. A possible solution would be to empty `jsonFiles/url_configuration.json` as seen below before starting the container:\n\n    ```\n    {\n    \"urls\": [\n    ]\n    }\n    ```\n\n## Citing\n\nIf you use this repository in your research, consider citing it using the following Bibtex entries:\n```\n@inproceedings{Tekli2021DesigningAE,\n  title={Designing and evaluating anonymization techniques for images and relational data streams via Machine Learning approaches at BMW Group. (Conception et {\\'e}valuation de techniques d'anonymisation des images et des flux de donn{\\'e}es relationnels via des approches d'apprentissage automatique {\\`a} BMW Group)},\n  author={Jimmy Tekli},\n  year={2021},\n  url={https://api.semanticscholar.org/CorpusID:266756928}\n}\n```\nand \n```\n@misc{bmwanotool,\n  author = {BMW TechOffice MUNICH},\n  title = {BMW Anonymization Tool},\n  howpublished = {\\url{https://github.com/BMW-InnovationLab/BMW-Anonymization-API}},\n  year = {2019},\n}\n```\n\n\n\n## Acknowledgments\n\nGhenwa Aoun\n\nAntoine Charbel, [inmind.ai](https://inmind.ai/), Beirut, Lebanon\n\nRoy Anwar\n\nFady Dib\n\nJimmy Tekli, BMW Innovation Lab, Munich, Germany\n\n[OpenVINO Toolkit](https://github.com/openvinotoolkit)\n\n[intel.com](https://intel.com)\n\n[robotron.de](https://www.robotron.de)\n"
  },
  {
    "path": "docker/dockerfile",
    "content": "FROM python:3.7\n\nCOPY docker/requirements.txt .\nCOPY src/main /main\n\nRUN apt-get update && apt-get install -y ffmpeg \\\n    libsm6 \\\n    libxext6\n\nRUN python -m pip install --upgrade pip \nRUN pip install -r requirements.txt\n\nWORKDIR /main\n\nCMD [\"uvicorn\", \"start:app\", \"--host\", \"0.0.0.0\", \"--port\", \"4343\"]\n"
  },
  {
    "path": "docker/requirements.txt",
    "content": "moviepy==1.0.3\naiofiles==0.8.0\nfastapi==0.70.1\nopencv-python==4.5.4.60\njsonschema==3.2.0\nnumpy==1.21.0\npython-multipart==0.0.5\nuvicorn==0.16.0\nPillow==9.0.0\nrequests==2.26.0\n"
  },
  {
    "path": "docker-compose.yml",
    "content": "version: '3'\nservices:\n\n  openvino_detection_api:\n    build: \n      context: ../BMW-IntelOpenVINO-Detection-Inference-API\n      dockerfile: Dockerfile\n    image: openvino_detection.api:latest\n    networks:\n      - anonym-net\n    ports:\n      - \"8081:80\"\n    volumes:\n      - \"../BMW-IntelOpenVINO-Detection-Inference-API/models:/models\"\n      - \"../BMW-IntelOpenVINO-Detection-Inference-API/models_hash:/models_hash\"\n\n  openvino_segmentation_api:\n    build: \n      context: ../BMW-IntelOpenVINO-Segmentation-Inference-API\n      dockerfile: docker/Dockerfile\n    image: openvino_segmentation.api:latest\n    networks:\n      - anonym-net\n    ports:\n      - \"8090:80\"\n    volumes:\n      - \"../BMW-IntelOpenVINO-Segmentation-Inference-API/models:/models\"\n      - \"../BMW-IntelOpenVINO-Segmentation-Inference-API/models_hash:/models_hash\"\n      \n  anonymization_api:\n    image: anonymize.api:latest\n    build: \n      context: .\n      dockerfile: docker/dockerfile\n    networks:\n      - anonym-net\n    ports:\n      - \"8070:4343\"\n    volumes:\n      - \"./jsonFiles:/jsonFiles\"\n      - \"./src/main/anonymized_video/:/main/anonymized_video\"\n    depends_on:\n      - openvino_detection_api\n      - openvino_segmentation_api\n\nnetworks: \n  anonym-net:\n"
  },
  {
    "path": "docker-compose_tf_gluoncv.yml",
    "content": "version: \"2.3\"\nservices:\n  detection_api:\n    image: tensorflow_inference_api_cpu:latest\n    build:\n      context: ../BMW-TensorFlow-Inference-API-CPU\n      dockerfile: docker/dockerfile\n    volumes:\n      - ../BMW-TensorFlow-Inference-API-CPU/models:/models\n      - ../BMW-TensorFlow-Inference-API-CPU/models_hash:/models_hash\n    ports:\n      - \"9998:4343\"\n      \n  segmentation_api:\n    image: gluoncv_segmentation_inference_api_cpu:latest\n    build:\n      context: ../BMW-Semantic-Segmentation-Inference-API-GPU-CPU\n      dockerfile: CPU/dockerfile\n     \n    volumes:\n      - ../BMW-Semantic-Segmentation-Inference-API-GPU-CPU/models:/models\n      - ../BMW-Semantic-Segmentation-Inference-API-GPU-CPU/models_hash:/models_hash\n    runtime: nvidia\n    ports:\n      - \"9999:4343\"\n    environment:\n      - NVIDIA_VISIBLE_DEVICES=1\n      \n  anonymization:\n    image: anonymization_api:latest \n    build:\n      context: .\n      dockerfile: docker/dockerfile\n    volumes:\n      - ./jsonFiles:/jsonFiles\n      - ./src/main/anonymized_video:/main/anonymized_video\n    ports:\n      - \"9997:4343\"\n    depends_on:\n      - detection_api\n      - segmentation_api\n"
  },
  {
    "path": "docker_compose_readme.md",
    "content": "# Deploying the BMW-Anonymization-Api with docker compose\n\nIn this section, docker compose will build and run a network of containers including the Anonymization API alongside multiple inference APIs.\n\nIn the following section, we encapsulate the [BMW-IntelOpenVINO-Inference-API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Inference-API) and the [BMW-IntelOpenVINO-Segmentation-API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Segmentation-Inference-API) with our anonymization API. \n\nThese two inference APIs contain example models optimzed via OpenVINO. Other OpenVINO models in Intermediate Representation(IR) format, converted via the [Intel&reg; OpenVINO&trade; toolkit v2021.1](https://docs.openvinotoolkit.org/latest/index.html), can be deployed with our APIs. Currently, OpenVINO supports conversion for DL-based models trained via several Machine Learning frameworks including Caffe, Tensorflow etc. Please refer to [the OpenVINO documentation](https://docs.openvinotoolkit.org/2021.1/openvino_docs_MO_DG_prepare_model_convert_model_Converting_Model.html) for further details on converting your Model.\n\n\n## Build and Run the network\n\nIn this section, docker compose will build and run a network of containers including the Anonymization API alongside the OpenVINO inference APIs for detection and segmentation. The instructions are provided below. \n\nTo run the APIs together, clone the [BMW-Anonymization-API](https://github.com/BMW-InnovationLab/BMW-Anonymization-API), the [BMW-IntelOpenVINO-Inference-API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Inference-API) and the [BMW-IntelOpenVINO-Segmentation-API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Segmentation-Inference-API) into the same directory.\n\nThe folder structure should be similar to as shown below:\n\n```shell\n│──BMW-Anonymization-API\n  │──docker \n  |──jsonFiles  \n  │──...\n  |──docker-compose.yml  \n  │──Readme.md  \n│──BMW-IntelOpenVINO-Segmentation-API \n  │──docker \n  |──...\n  │──docs  \n  │──Readme.md\n│──BMW-IntelOpenVINO-Detection-Inference-API\n  │──docker \n  |──...\n  │──docs  \n  │──Readme.md\n  \n```\n\nIn the BMW-Anonymization API replace the `./BMW-Anonymization-API/jsonFiles/url_configuration.json` with the provided `./url_for_openvino_compose/url_configuration.json`.\n\nThree services are configured in the `docker-compose.yml` file in this repository: the [BMW-Anonymization-API](https://github.com/BMW-InnovationLab/BMW-Anonymization-API), the [BMW-IntelOpenVINO-Inference-API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Inference-API) and the [BMW-IntelOpenVINO-Segmentation-API](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Segmentation-Inference-API). You can modify the build context to specify the base directories of the APIs (ensure the correct path is also given for the mounted volumes). You can also modify the host ports you wish to use for the APIs. \n\nAfter you configure your docker-compose.yml file, you can run the following command in the anonymization API directory:\n\n### Build the images\nTo build the images, run the following command in this directory:\n```sh\ndocker-compose build\n```\n\n### Run the network\nTo run the network, use the following command in this directory:\n```sh\ndocker-compose up\n```\n\n### Stop the running containers\nTo stop the network, run the following command in this directory:\n```sh\ndocker-compose down\n```\n\n### Restart the network\nTo restart the network, run the following command in this directory:\n```sh\ndocker-compose restart\n```\n\n## API Endpoints\n\nTo see all available endpoints, open your favorite browser and navigate to:\n```\nhttp://<machine_IP>:<docker_host_port>/docs\n```\nIf you use the standard configuration of the `docker-compose.yml` the folllowing endpoints are available:\n| API | Endpoint |\n| ------------------ | ------------------ |\n| BMW-Anonymization-API | http://localhost:8070/docs |\n| RCV-IntelOpenVINO-Detection-API | http://localhost:8081/docs |\n| BMW-IntelOpenVINO-Segmentation-API | http://localhost:8090/docs |\n\n**Please refer to the Endpoints Summary section in the [initial readme](https://github.com/BMW-InnovationLab/Anonymization_API/tree/priority-3)**\n\n## Using other inference APIs\n\nOther inference APIs can also be configured within the docker-compose.yml such as our [tensorflow CPU detection API](https://github.com/BMW-InnovationLab/BMW-TensorFlow-Inference-API-CPU) and [semantic segmentation CPU/GPU](https://github.com/BMW-InnovationLab/BMW-IntelOpenVINO-Segmentation-Inference-API))\nIf you wish to deploy other inference APIs, please make sure to the docker-compose.yml accordingly:\n- Modify the context in order to specify the base directory of each API\n- Modify the dockerfile entry to match the path of the dockerfile in the API directory \n- Modify the ports and choose the ones you wish to use for each API\n- In case you are setting up a GPU-based inference API, do not forget to set the runtime entry as \"nvidia\" \n\nWe provided a sample docker-compose file  `./BMW-Anonymization-API/docker-compose_tf_gluoncv.yml`\n\n"
  },
  {
    "path": "jsonFiles/url_configuration.json",
    "content": "{\n  \"urls\": [\n  ]\n}\n"
  },
  {
    "path": "jsonFiles/user_configuration.json",
    "content": "{\n  \"sensitive_info\": [\n    {\n      \"model_name\": \"sample_model\",\n      \"class_name\": \"person\",\n      \"anonymization_technique\": \"blackening\",\n      \"inference_type\": \"segmentation\",\n      \"anonymization_degree\": 1\n    }\n  ]\n}\n"
  },
  {
    "path": "references/techniques.md",
    "content": "# Add a new technique to the API\n\nIt is mandatory that the techniques you are adding are actually implemented\nThese are the steps that should be applied so that the anonymization technique you are adding can be applicable:\n- Go to \"src/main/anonymization/base_anonymization.py\"\n- Add the signature of the method similarly to what is already implemented (name + parameters); this method will be overridden in the other files.\n- Now this method should be implemented in the files that are specified in the \"/src/main/anonymization\" directory (except base_anonymization.py)\n  These files, for example detection_anonymization.py and segmentation_anonymization.py consist of two different classes both extending the BaseAnonymization class.\n\n## Types of labels\n\nThis API contains two types of labels: \n\n* Common labels\n- Special labels\n\n## Adding common labels\n\nCommon labels are the ones that support common techniques (techniques that can be applied to all labels such as blurring, pixelating and blackening in our case)\nSpecial labels are the ones that support, in addition to the common labels techniques, a specific technique that should be specified.\nBased on the above, add the following after implementing the technique:\n\nIf you want to add a common technique (for common labels; that can be applied to all labels):\n\nJust go to \"/src/main/supported_methods/common_labels.py\" and add the name of the technique as an attribute to the CommonLabels class.\nFor example:\n\n![](./common_technique.PNG)\n\nAll the labels will automatically support the newly added technique if it is actually implemented as mentioned above.\n\n## Adding specials labels\n\nIf you want to add a special technique (for a special label; that can be applied only to this label):\n- Go to \"/src/main/supported_methods\"\n- Create a new python file which name is the name of the special label\n- This file should contain a class that represents the special label and this class will extend the CommonLabels class\n- Add the special technique as an attribute\n- For example if we want to add the faceswap technique that can only be applied on the face label we should create a face.py file that should look like the following:\n\n![](./special_technique.PNG)\n\nThis way, the face label will support all the common techniques in addition to the special one that will only be applied on it (faceswap) if the faceswap  technique is correctly implemented."
  },
  {
    "path": "src/main/APIClient.py",
    "content": "import os\nimport time\nimport io\nimport sys\nimport json\nimport requests\nimport jsonschema\nfrom exceptions import InvalidUrlConfiguration, ApplicationError\n\n\nclass ApiClient:\n    def __init__(self):\n        self.configuration = []\n        self.url_list = self.get_url_configuration()\n        self.get_api_configuration()\n\n    def get_configuration(self):\n        try:\n            return self.configuration\n        except ApplicationError as e:\n            raise e\n\n    @staticmethod\n    def get_url_configuration():\n        \"\"\"\n        :return: List of all the api urls provided in the url_configuration file\n        \"\"\"\n        with open('../jsonFiles/url_configuration.json') as f:\n            data = json.load(f)\n            urls = data[\"urls\"]\n            try:\n                validate_url_configuration(data)\n            except Exception as e:\n                raise InvalidUrlConfiguration\n            return urls\n\n    def get_api_configuration(self):\n        for url in self.url_list:\n            self.get_models(url)\n\n    @staticmethod\n    def get_model_names(url: str):\n        time.sleep(5)\n        response = requests.get(\n            url=url + \"models\")\n        models_list = response.json()[\"data\"][\"models\"]\n        return models_list\n\n    def get_models(self, url: str):\n        \"\"\"\n        Returns a list of json objects representing the configuration of each api\n        corresponding to each url in the url_configuration file\n        :param url: Each url in the url_configuration file\n        :return: List of json objects\n        \"\"\"\n        models_list = self.get_model_names(url)\n        for model_name in models_list:\n            labels_list = self.get_labels(url, model_name)\n            model_type = self.get_model_configuration(url, model_name)\n            palette = None\n            if \"segmentation\" in model_type:\n                palette = self.get_palette(url, model_name)\n            self.configuration.append({\n                \"name\": model_name,\n                \"labels\": labels_list,\n                \"type\": model_type,\n                \"url\": url,\n                \"palette\": palette\n            })\n\n    @staticmethod\n    def get_palette(url: str, model_name: str):\n        response = requests.get(\n            url=url + \"models/\" + model_name + \"/palette\"\n        )\n        return response.json()[\"data\"]\n\n    @staticmethod\n    def get_labels(url: str, model_name: str):\n        response = requests.get(\n            url=url + \"models/\" + model_name + \"/labels\"\n        )\n        return response.json()[\"data\"]\n\n    @staticmethod\n    def get_model_configuration(url: str, model_name: str):\n        response = requests.get(\n            url=url + \"models/\" + model_name + \"/config\"\n        )\n        return response.json()[\"data\"][\"type\"]\n\n    @staticmethod\n    def get_detection_response(url: str, model_name: str, im):\n        response = requests.post(\n            url=url + \"models/\" + model_name + \"/predict\",\n            files={'input_data': io.BytesIO(im.tobytes())})\n        return response.json()\n\n    @staticmethod\n    def get_segmentation_response(url: str, model_name: str, im):\n        response = requests.post(\n            url=url + \"models/\" + model_name + \"/inference\",\n            files={'input_data': io.BytesIO(im.tobytes())}\n        )\n        return response\n\n\ndef validate_url_configuration(data):\n    \"\"\"\n    Validate the url_configuration file by comparing it to the urlConfigurationSchema\n    :param data: The data from the url_configuration file\n    \"\"\"\n    with open('urlConfigurationSchema') as f:\n        schema = json.load(f)\n    try:\n        jsonschema.validate(data, schema)\n    except Exception as e:\n        raise InvalidUrlConfiguration(e)\n"
  },
  {
    "path": "src/main/ConfigurationSchema.json",
    "content": "{\n    \"type\": \"object\",\n    \"properties\": \n    {\n      \"sensitive_info\": \n      {\n        \"type\": \"array\",\n        \"items\": [\n          {\n            \"type\": \"object\",\n            \"properties\": \n            {\n              \"model_name\": \n              {\n                \"type\": \"string\"\n              },\n              \"class_name\": \n              {\n                \"type\": \"string\"\n              },\n              \"anonymization_technique\": \n              {\n                \"type\": \"string\"\n              },\n              \"inference_type\": \n              {\n                \"type\": \"string\"\n              },\n              \"anonymization_degree\": \n              {\n                \"type\": \"number\",\n                \"minimum\": 0,\n                \"maximum\": 1\n              }\n            },\n            \"required\": [\n              \"model_name\",\n              \"class_name\",\n              \"anonymization_technique\",\n              \"inference_type\",\n              \"anonymization_degree\"\n            ]\n          }\n        ]\n      }\n    },\n    \"required\": [\n      \"sensitive_info\"\n    ]\n}  "
  },
  {
    "path": "src/main/__init__.py",
    "content": ""
  },
  {
    "path": "src/main/anonymization/__init__.py",
    "content": ""
  },
  {
    "path": "src/main/anonymization/base_anonymization.py",
    "content": "from abc import ABC, abstractmethod\n\n\nclass BaseAnonymization(ABC):\n    \"\"\"\n    Base anonymization class for the detection and the semantic anonymization\n    \"\"\"\n    @abstractmethod\n    def blurring(self, image, response, degree=None, id=None, mask=None):\n        pass\n\n    @abstractmethod\n    def pixelating(self, image, response, degree=None, id=None, mask=None):\n        pass\n\n    @abstractmethod\n    def blackening(self, image, response, degree=None, id=None, mask=None):\n        pass\n"
  },
  {
    "path": "src/main/anonymization/detection_anonymization.py",
    "content": "from anonymization.base_anonymization import BaseAnonymization\nfrom PIL import ImageFilter, Image\n\n\ndef find_boxes(bbox):\n    nb = []\n    for i in bbox:\n        nb.append(i)\n    return nb\n\n\nclass DetectionAnonymization(BaseAnonymization):\n    def __init__(self):\n        pass\n\n    def blurring(self, image, response, degree=None, id=None, mask=None):\n        \"\"\"\n        Blur the detected objects based on the user's requirements\n        :param image: input image\n        :param response: The response parsed from the object detection api\n        :param degree: The degree of the anonymization (specified in the user_configuration file)\n        :param id:\n        :param mask:\n        :return: The anonymized image\n        \"\"\"\n        boxes = find_boxes(response)\n        for i in boxes:\n            cropped_image = image.crop((i[0], i[1], i[2], i[3]))\n            blurred_image = cropped_image.filter(ImageFilter.GaussianBlur(25*float(degree)))\n            image.paste(blurred_image, (i[0], i[1], i[2], i[3]))\n        return image\n\n    def pixelating(self, image, response, degree=None, id=None, mask=None):\n        \"\"\"\n        Pixelate the detected objects based on the user's requirements\n        :param image: input image\n        :param response: The response parsed from the object detection api\n        :param degree: The degree of the anonymization (specified in the user_configuration file)\n        :param id:\n        :param mask:\n        :return: The anonymized image\n        \"\"\"\n        boxes = find_boxes(response)\n        for i in boxes:\n            cropped_image = image.crop((i[0], i[1], i[2], i[3]))\n            w, h = cropped_image.size\n            small = cropped_image.resize((int(w / (float(degree) * w)), int(h / (float(degree) * h))), Image.BILINEAR)\n            result = small.resize(cropped_image.size, Image.NEAREST)\n            image.paste(result, (i[0], i[1], i[2], i[3]))\n        return image\n\n    def blackening(self, image, response, degree=None, id=None, mask=None):\n        \"\"\"\n        Blacken the detected objects based on the user's requirements\n        :param image: input image\n        :param response: The response parsed from the object detection api\n        :param degree: The degree of the anonymization (specified in the user_configuration file)\n        :param id:\n        :param mask:\n        :return: The anonymized image\n        \"\"\"\n        boxes = find_boxes(response)\n        for i in boxes:\n            cropped = image.crop((i[0], i[1], i[2], i[3]))\n            h, w = cropped.size\n            black = Image.new(str(image.mode), (h, w), 'black')\n            result = Image.blend(cropped, black, float(degree))\n            cropped.paste(result)\n            image.paste(cropped, (i[0], i[1], i[2], i[3]))\n        return image\n"
  },
  {
    "path": "src/main/anonymization/segmentation_anonymization.py",
    "content": "from anonymization.base_anonymization import BaseAnonymization\nimport os\nfrom PIL import ImageFilter, Image\nimport numpy as np\nimport cv2\nimport io\n\nclass SegmentationAnonymization(BaseAnonymization):\n    def __init__(self):\n        pass\n\n    def blurring(self, image, response, degree=None, id=None, mask=None):\n        \"\"\"\n        Blur the segmented objects based on the user's requirements\n        :param image: input image\n        :param response: The response parsed from the semantic segmentation api\n        :param degree: The degree of the anonymization (specified in the user_configuration file)\n        :param id: The id of the segmented class\n        :param mask: The mask we will apply the anonymization on\n        :return: The anonymized image\n        \"\"\"\n        cropped = image.crop((response[0], response[1], response[2], response[3]))\n        blurred = cropped.filter(ImageFilter.GaussianBlur(25 * float(degree)))\n        mask = Image.open(io.BytesIO(mask.content))\n        img_array = np.array(mask)\n        img=Image.fromarray(img_array)\n        im = img.crop((response[0], response[1], response[2], response[3]))\n        rgb_image=np.array(im.convert(mode=\"RGB\"))\n        src=cv2.cvtColor( rgb_image, cv2.COLOR_RGB2BGR)\n        tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n        ex1=cv2.inRange(tmp,int(id),int(id))\n        ex, alpha = cv2.threshold(ex1, 0, 255, cv2.THRESH_BINARY)\n        b, g, r = cv2.split(src)\n        rgba = [b, g, r, alpha]\n        dst = cv2.merge(rgba, 4)\n        test=Image.fromarray(np.array(dst))\n        image.paste(blurred, (response[0], response[1], response[2], response[3]), mask=test)\n        return image\n\n    def pixelating(self, image, response, degree=None, id=None, mask=None):\n        \"\"\"\n         Pixelate the segmented objects based on the user's requirements\n         :param image: input image\n         :param response: The response parsed from the semantic segmentation api\n         :param degree: The degree of the anonymization (specified in the user_configuration file)\n         :param id: The id of the segmented class\n         :param mask: The mask we will apply the anonymization on\n         :return: The anonymized image\n         \"\"\"\n        cropped = image.crop((response[0], response[1], response[2], response[3]))\n        mask = Image.open(io.BytesIO(mask.content))\n        img_array = np.array(mask)\n        img=Image.fromarray(img_array)\n        im = img.crop((response[0], response[1], response[2], response[3]))\n        rgb_image=np.array(im.convert(mode=\"RGB\"))\n        src=cv2.cvtColor( rgb_image, cv2.COLOR_RGB2BGR)\n        tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n        ex1=cv2.inRange(tmp,int(id),int(id))\n        ex, alpha = cv2.threshold(ex1, 0, 255, cv2.THRESH_BINARY)\n        b, g, r = cv2.split(src)\n        rgba = [b, g, r, alpha]\n        dst = cv2.merge(rgba, 4)\n        test=Image.fromarray(np.array(dst))\n        w, h = cropped.size\n        small = cropped.resize((int(w / (float(degree) * w)), int(h / (float(degree) * h))), Image.BILINEAR)\n        result = small.resize(cropped.size, Image.NEAREST)\n        image.paste(result, (response[0], response[1], response[2], response[3]), mask=test)\n        return image\n\n    def blackening(self, image, response, degree=None, id=None, mask=None):\n        \"\"\"\n         Blacken the segmented objects based on the user's requirements\n         :param image: input image\n         :param response: The response parsed from the semantic segmentation api\n         :param degree: The degree of the anonymization (specified in the user_configuration file)\n         :param id: The id of the segmented class\n         :param mask: The mask we will apply the anonymization on\n         :return: The anonymized image\n         \"\"\"\n        cropped = image.crop((response[0], response[1], response[2], response[3]))\n        mask = Image.open(io.BytesIO(mask.content))\n        img_array = np.array(mask)\n        img=Image.fromarray(img_array)\n        im = img.crop((response[0], response[1], response[2], response[3]))\n        rgb_image=np.array(im.convert(mode=\"RGB\"))\n        src=cv2.cvtColor( rgb_image, cv2.COLOR_RGB2BGR)\n        tmp = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n        ex1=cv2.inRange(tmp,int(id),int(id))\n        ex, alpha = cv2.threshold(ex1, 0, 255, cv2.THRESH_BINARY)\n        b, g, r = cv2.split(src)\n        rgba = [b, g, r, alpha]\n        dst = cv2.merge(rgba, 4)\n        test=Image.fromarray(np.array(dst))\n        h, w = cropped.size\n        black = Image.new(str(image.mode), (h, w), 'black')\n        result = Image.blend(cropped, black, float(degree))\n        cropped.paste(result)\n        image.paste(cropped, (response[0], response[1], response[2], response[3]), mask=test)\n        return image\n"
  },
  {
    "path": "src/main/anonymization_service.py",
    "content": "import io\nimport os\nimport cv2\nimport sys\nimport numpy as np\nfrom PIL import Image\nfrom io import BytesIO\nfrom datetime import datetime\nfrom APIClient import ApiClient\nfrom fastapi import File, UploadFile\nfrom exceptions import ApplicationError, InvalidInputData\nfrom strategy_context import StrategyContext\nfrom helpers import get_user_models\nimport helpers\nimport moviepy.editor\n\nsys.path.append(\"anonymization\")\n\n\nclass AnonymizationService:\n\n    def __init__(self):\n        self.strategy_context = StrategyContext()\n\n    def anonymize(self, image: UploadFile = File(...), configuration: UploadFile = File(...)):\n        \"\"\"\n        Calls the correct anonymization method based on the model type and the technique\n        :param image: Input image\n        :param configuration: user configuration file\n        :return: File response representing the anonymized image\n        \"\"\"\n        result = None\n        im = Image.open(image.file).convert('RGB')\n        rgb_image_0 = np.array(im)\n        bgr_image_0 = cv2.cvtColor(rgb_image_0, cv2.COLOR_RGB2BGR)\n        response = []\n        configuration_path = '../jsonFiles/user_configuration.json'\n\n        with open(configuration_path, 'wb') as config:\n            config.write(configuration.file.read())\n        try:\n            users_models = get_user_models(configuration_path)\n        except ApplicationError as e:\n            raise e\n        _, im_png = cv2.imencode(\".png\", bgr_image_0)\n        errors = []\n        i = 0\n        for each in users_models:\n            i = i + 1\n            try:\n                response, mask = getattr(helpers, \"parse_\" + each[\"model_type\"] + \"_response\")(each, im_png, i, errors)\n            except Exception as e:\n                errors.append(\n                    \"The model type <\" + each[\"model_type\"] + \"> in sensitive info <\" + str(i) + \"> is not supported.\")\n            if response:\n                if not errors:\n                    for r in response:\n                        inference_type = r['type']\n                        technique = r['technique']\n                        box = r['boxes']\n                        degree = r['degree']\n                        label_id = r['label_id']\n                        anonymization_name = inference_type + \"_anonymization\"\n                        anonymization_class = anonymization_name.title().replace(\"_\", \"\")\n                        try:\n                            result = self.strategy_context.anonymize(\n                                getattr(__import__(anonymization_name), anonymization_class)(),\n                                technique=technique, image=im, response=box,\n                                degree=degree, label_id=label_id, mask=mask)\n                        except ApplicationError as e:\n                            raise e\n                    rgb_image = np.array(result)\n                    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n                else:\n                    bgr_image = None\n            else:\n                rgb_image = np.array(im)\n                bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n        return bgr_image, errors\n\n    def anonymize_video(self, video: UploadFile = File(...), configuration: UploadFile = File(...)):\n        result = None\n        configuration_path = '../jsonFiles/user_configuration.json'\n        with open(configuration_path, 'wb') as config:\n            config.write(configuration.file.read())\n        try:\n            users_models = get_user_models(configuration_path)\n        except ApplicationError as e:\n            raise e\n        response = []\n        with open('video.mp4', 'wb') as v:\n            try:\n                v.write(video.file.read())\n            except Exception as e:\n                raise InvalidInputData(e)\n            initial_video = moviepy.editor.VideoFileClip(\"video.mp4\")\n            initial_video_audio = initial_video.audio\n            cap = cv2.VideoCapture('video.mp4')\n            fps = cap.get(cv2.CAP_PROP_FPS)\n            i = 0\n            while cap.isOpened():\n                ret, frame = cap.read()\n                if ret is True:\n                    im = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n                    img = Image.fromarray(im)\n                    bgr_image_0 = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)\n                    _, im_png = cv2.imencode(\".png\", bgr_image_0)\n                    path = os.getcwd() + \"/frames/\" + str(i) + \".jpg\"\n                    i = i + 1\n                    errors = []\n                    n = 0\n                    for each in users_models:\n                        n = n + 1\n                        result = None\n                        try:\n                            response, mask = getattr(helpers, \"parse_\" + each[\"model_type\"] + \"_response\")(each, im_png,\n                                                                                                           n, errors)\n                        except Exception as e:\n                            errors.append(\"The model type <\" + each[\"model_type\"] + \"> in sensitive info <\" + str(\n                                n) + \"> is not supported.\")\n                        if response:\n                            if not errors:\n                                for r in response:\n                                    inference_type = r['type']\n                                    technique = r['technique']\n                                    box = r['boxes']\n                                    degree = r['degree']\n                                    label_id = r['label_id']\n                                    anonymization_name = inference_type + \"_anonymization\"\n                                    anonymization_class = anonymization_name.title().replace(\"_\", \"\")\n                                    try:\n                                        result = self.strategy_context.anonymize(\n                                            getattr(__import__(anonymization_name), anonymization_class)(),\n                                            technique=technique, image=img, response=box,\n                                            degree=degree, label_id=label_id, mask=mask)\n                                    except ApplicationError as e:\n                                        raise e\n                                result.save(path)\n                            else:\n                                return errors\n                        else:\n                            img.save(path)\n                else:\n                    break\n            print(\"Processing ...\")\n            images = [img for img in os.listdir(os.getcwd() + \"/frames\") if img.endswith(\".jpg\")]\n            sort = []\n            for frame in images:\n                name = frame.split(\".\")[0]\n                image_number = int(name)\n                sort.append(image_number)\n            sort = sorted(sort)\n            frame = cv2.imread(os.path.join(os.getcwd() + \"/frames\", str(sort[0]) + \".jpg\"))\n            height, width, layers = frame.shape\n            fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n            output_video_dir = \"Anonymized_\" + video.filename.split(\".\")[0] + \"_\" + datetime.now().strftime(\n                \"%d_%m_%y_%H_%M_%S\")\n            video = cv2.VideoWriter('anonymized_video/' + output_video_dir + '.mp4', fourcc, fps, (width, height))\n            for image in sort:\n                video.write(cv2.imread(os.path.join(os.getcwd() + \"/frames\", str(image) + \".jpg\")))\n                os.remove(os.getcwd() + \"/frames/\" + str(image) + \".jpg\")\n            cv2.destroyAllWindows()\n            video.release()\n            anonymized_video = moviepy.editor.VideoFileClip('anonymized_video/' + output_video_dir + '.mp4')\n            anonymized_video.audio = initial_video_audio\n            os.remove('anonymized_video/' + output_video_dir + '.mp4')\n            anonymized_video.write_videofile('anonymized_video/' + output_video_dir + '.mp4')\n            os.remove(os.getcwd() + \"/video.mp4\")\n            return \"Done\"\n"
  },
  {
    "path": "src/main/anonymized_video/.gitignore",
    "content": "# Ignore everything in this directory\n*\n# Except this file\n!.gitignore\n"
  },
  {
    "path": "src/main/config.py",
    "content": "master_dict={}\n"
  },
  {
    "path": "src/main/exceptions.py",
    "content": "__metaclass__ = type\n\n\nclass ApplicationError(Exception):\n    \"\"\"Base class for other exceptions\"\"\"\n\n    def __init__(self, default_message, additional_message=''):\n        self.default_message = default_message\n        self.additional_message = additional_message\n\n    def __str__(self):\n        return self.get_message()\n\n    def get_message(self):\n        return self.default_message if self.additional_message == '' else \"{}: {}\".format(self.default_message,\n                                                                                          self.additional_message)\n\n\nclass InvalidModelConfiguration(ApplicationError):\n    \"\"\"Raised when the model's configuration is corrupted\"\"\"\n\n    def __init__(self, additional_message=''):\n        super().__init__('Invalid model configuration', additional_message)\n\n\nclass InvalidInputData(ApplicationError):\n    \"\"\"Raised when the input data is corrupted\"\"\"\n\n    def __init__(self, additional_message=''):\n        super().__init__('Invalid input data', additional_message)\n\n\nclass InvalidUrlConfiguration(ApplicationError):\n    \"\"\"Raised when the model's configuration is corrupted\"\"\"\n\n    def __init__(self, additional_message=''):\n        super().__init__('Invalid url configuration', additional_message)"
  },
  {
    "path": "src/main/helpers.py",
    "content": "import io\nimport cv2\nimport json\nimport config\nimport requests\nimport jsonschema\nimport numpy as np\nfrom PIL import Image\nfrom APIClient import ApiClient\nfrom labels import labels_methods\nfrom exceptions import ApplicationError, InvalidModelConfiguration\n\n# master_dict = labels_methods()\nmaster_dict = config.master_dict\n\n\ndef get_user_models(configuration_path):\n    \"\"\" Returns a list of json objects that represent the sensitive info given by the user in\n    the configuration file\n    :param configuration_path: The user configuration path\n    :return: List of json objects\n    \"\"\"\n    user_models = []\n    with open(configuration_path) as f:\n        try:\n            data = json.load(f)\n        except Exception:\n            raise InvalidModelConfiguration(\"Json file corrupted\")\n    try:\n        validate_json_configuration(data)\n    except ApplicationError as e:\n        raise e\n    for info in data['sensitive_info']:\n        url = info.get('url')\n        models = {\n            'url': url,\n            'model_name': info['model_name'],\n            'label_name': info['class_name'],\n            'model_type': info['inference_type'].casefold(),\n            'technique': info['anonymization_technique'].casefold(),\n            'degree': info['anonymization_degree']\n        }\n        user_models.append(models)\n    return user_models\n\n\ndef parse_inference_response(inference_type, user_config, im, i, errors):\n    \"\"\" Either returns the response from the inference api or returns an array with the user's configuration file errors\n    :param inference_type: i.e detection\n    :param user_config: a json object that represent each sensitive info specified by the user\n    :param im: the image object that we need to anonymize\n    :param i: the index of the sensitive info that we are using\n    :param errors: a list that will be filled in case any error in the user's configuration file is present\n    :return: inference api response or the list of errors\n    \"\"\"\n    master_dict = config.master_dict\n    # getting all the supported labels\n    labels = list(master_dict.keys())\n    # checking if the user's requested label is present in the supported ones\n    if user_config[\"label_name\"] in labels:\n        # checking if the user's label is supported by the user's specified model type\n        if inference_type in master_dict[user_config[\"label_name\"]].keys():\n            # getting the urls and the models that supports the user's label and that are compatible with the user's specified model type at the same time\n            urls = list(\n                master_dict[user_config[\"label_name\"]][inference_type].keys())\n            models = list(\n                master_dict[user_config[\"label_name\"]][inference_type].values())\n            # we check each possible case that can occur:\n            # 1- the url is specified in the user's configuration file, this url is present in the matching urls, and the inference type is correct\n            if user_config[\"url\"] is not None and user_config[\"url\"] in urls and user_config[\n                \"model_type\"] == inference_type:\n                # if all the conditions above are true, we now check if the label is supported by the user's specified url and if the anonymization technique is applicable to this label\n                if user_config[\"model_name\"] in master_dict[user_config[\"label_name\"]][user_config[\"model_type\"]][\n                    user_config[\"url\"]] and user_config[\"technique\"] in master_dict[user_config[\"label_name\"]][\n                    \"technique\"]:\n                    # in this case, we send a request to the inference api to get the response\n                    response = getattr(ApiClient, \"get_\" + inference_type + \"_response\")(user_config['url'],\n                                                                                         user_config['model_name'], im)\n                    if inference_type == \"segmentation\":\n                        labels_list = ApiClient.get_labels(user_config['url'], user_config[\"model_name\"])\n                        #palette = ApiClient.get_palette(user_config['url'], user_config['model_name'])\n                        json_array = get_bbs(response, labels_list, user_config)\n                        return response, json_array\n                    else:\n                        return response\n                # here we are filling the errors list with all the errors that are present in the sensitive info and the index of this info in case one of the conditions above is false\n                elif user_config[\"model_name\"] not in master_dict[user_config[\"label_name\"]][user_config[\"model_type\"]][\n                    user_config[\"url\"]]:\n                    errors.append(\"The model <\" + user_config[\"model_name\"] + \"> in sensitive info <\" + str(\n                        i) + \"> is not available in the \" + inference_type + \" url : <\" + user_config[\n                                      \"url\"] + \"> for the label <\" + user_config[\"label_name\"] + \">.\")\n                elif user_config[\"technique\"] not in master_dict[user_config[\"label_name\"]][\"technique\"]:\n                    errors.append(\"The technique <\" + user_config[\"technique\"] + \"> is not supported for the label <\" +\n                                  user_config[\"label_name\"] + \"> in sensitive info <\" + str(i) + \">.\")\n            # 2- the url is specified in the user's configuration file, this url is not between the matching urls, and the inference type is correct\n            elif user_config[\"url\"] is not None and user_config[\"url\"] not in urls and user_config[\n                \"model_type\"] == inference_type:\n                errors.append(\"the url <\" + user_config[\"url\"] + \"> does not belong to the list of urls supported\")\n\n            # 3- the user hasn't specified any url and the inference type is correct\n            elif user_config[\"url\"] is None and user_config[\"model_type\"] == inference_type:\n                model_not_found = True\n                for val in models:\n                    if user_config[\"model_name\"] in val:\n                        inde = models.index(val)\n                        model_not_found = False\n\n                # we check if the model specified is between the matching models and if the technique specified is applicable to this label\n                if not model_not_found and user_config[\"model_name\"] in models[inde] and user_config[\"technique\"] in master_dict[user_config[\"label_name\"]][\"technique\"]:\n                    # we choose the first matching url\n                    response = getattr(ApiClient, \"get_\" + inference_type + \"_response\")(urls[inde],\n                                                                                         user_config['model_name'], im)\n                    if inference_type == \"segmentation\":\n                        labels_list = ApiClient.get_labels(urls[inde], user_config[\"model_name\"])\n                        #palette = ApiClient.get_palette(urls[inde], user_config['model_name'])\n                        json_array = get_bbs(response, labels_list, user_config)\n                        return response, json_array\n                    else:\n                        return response\n                # here we are filling the errors list with all the errors that are present in the sensitive info and the index of this info in case one of the conditions above is false\n                elif model_not_found:\n                    errors.append(\"The model <\" + user_config[\"model_name\"] + \"> in sensitive info <\" + str(\n                        i) + \"> is not available in the \" + inference_type + \" for label  <\" + user_config[\n                                      \"label_name\"] + \">\")\n                elif user_config[\"technique\"] not in master_dict[user_config[\"label_name\"]][\"technique\"]:\n                    errors.append(\"The technique <\" + user_config[\"technique\"] + \"> is not supported for the label <\" +\n                                  user_config[\"label_name\"] + \"> in sensitive info <\" + str(i) + \">.\")\n        else:\n            errors.append(\"The label <\" + user_config[\"label_name\"] + \"> in the sensitive info <\" + str(\n                i) + \"> is not supported by a \" + inference_type + \" api.\")\n    else:\n        errors.append(\"The label <\" + user_config[\"label_name\"] +\n                      \"> in the sensitive info <\" + str(i) + \"> is not supported.\")\n    return errors\n\n\ndef get_bbs(image, labels_list, user_config):\n    image = Image.open(io.BytesIO(image.content))\n    palette=image.getpalette()\n    label_id = labels_list.index(user_config['label_name'])\n    rgb_image = np.array(image.convert(mode=\"RGB\"))\n    bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n    response = []\n\n    lower = np.array([palette[(int(label_id) * 3) + 2], palette[(int(label_id) * 3) + 1], palette[(int(label_id) * 3)]])\n    mask1 = cv2.inRange(bgr_image, lower, lower)\n    contours, _ = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n    boxes = []\n    for c in contours:\n        (x, y, w, h) = cv2.boundingRect(c)\n        if (len(np.array(c).flatten()) > 3):\n            boxes.append([x, y, x + w, y + h])\n    for b in boxes:\n        response.append({\"ObjectClassId\": label_id, \"class_name\": user_config[\"label_name\"], \"bbox\": b})\n    return response\n\n\ndef parse_detection_response(user_config, im, i, errors):\n    \"\"\" Parse the object detection api response and returns a list of json objects\n    representing the parsed response in addition to some useful user sensitive info\n    :param user_config: a json object that represent each sensitive info specified by the user\n    :param im: the image we need to anonymize\n    :param i: index of the sensitive info\n    :param errors: list of errors that will be filled in case any error is the user's configuration file is present\n    :return: List of json objects\n    \"\"\"\n    boxes = []\n    bounding_boxes = []\n    response = parse_inference_response(\"detection\", user_config, im, i, errors)\n    if not errors:\n        for bbox in response['data']['bounding-boxes']:\n            if bbox['ObjectClassName'] == user_config['label_name']:\n                bounds = [bbox['coordinates']['left'],\n                          bbox['coordinates']['top'],\n                          bbox['coordinates']['right'],\n                          bbox['coordinates']['bottom']]\n                boxes.append(bounds)\n                bounding_boxes.append({'type': user_config['model_type'],\n                                       'technique': user_config['technique'],\n                                       'boxes': boxes,\n                                       'degree': user_config['degree'],\n                                       'label_id': None})\n        return bounding_boxes, None\n    else:\n        return response, None\n\n\ndef parse_segmentation_response(user_config, im, i, errors):\n    \"\"\" Parse the semantic segmentation api response and returns a list of json objects\n    representing the parsed response in addition to some useful user sensitive info\n    :param user_config: a json object that represent each sensitive info specified by the user\n    :param im: the image we need to anonymize\n    :param i: index of the sensitive info\n    :param errors: list of errors that will be filled in case any error is the user's configuration file is present\n    :return: List of json objects\n    \"\"\"\n    bounding_boxes = []\n    r = parse_inference_response(\"segmentation\", user_config, im, i, errors)\n    try:\n        mask, response = r\n    except Exception as e:\n        response = r\n    if not errors:\n        for data in response:\n            if data['class_name'] == user_config['label_name']:\n                result = {\n                    'boxes': data['bbox'],\n                    'type': user_config['model_type'],\n                    'technique': user_config['technique'],\n                    'degree': user_config['degree'],\n                    'label_id': data['ObjectClassId']\n                }\n                bounding_boxes.append(result)\n        return bounding_boxes, mask\n    else:\n        return response, None\n\n\ndef validate_json_configuration(data):\n    \"\"\"Validate the user configuration file by comparing it to the ConfigurationSchema\n    :param data: The data from the user configuration file\n    \"\"\"\n    with open('ConfigurationSchema.json') as f:\n        schema = json.load(f)\n    try:\n        jsonschema.validate(data, schema)\n    except Exception as e:\n        raise InvalidModelConfiguration(e)\n\n\ndef check_api_availability(url: str):\n    try:\n        response = requests.get(url + \"models\")\n    except Exception:\n        raise Exception(\"wrong url format. expected format: http://ip:port/\")\n    return response\n\n\ndef parse_json(json_path):\n    with open(json_path, 'r') as f:\n        try:\n            payload = json.load(f)\n        except Exception:\n            raise Exception(\"Json file corrupted\")\n    return payload\n\n\ndef write_json(payload, json_path):\n    with open(json_path, 'w') as outfile:\n        json.dump(payload, outfile)\n"
  },
  {
    "path": "src/main/labels.py",
    "content": "import os\nimport sys\nimport json\nfrom APIClient import ApiClient\n\nsys.path.append(\"supported_methods\")\n\n\ndef labels_methods():\n    \"\"\"\n    Returns a list of the available labels with their available anonymization methods, the urls\n    that supports them, the type of each url and the model names\n    :return: List of json objects\n    \"\"\"\n    api_client = ApiClient()\n    labels = []\n    master_dict = {}\n    types = []\n    urls = []\n    for model in api_client.configuration:\n        types.append(model['type'])\n        urls.append(model['url'])\n\n        for label in model[\"labels\"]:\n            labels.append(label)\n\n    master_dict = master_dict.fromkeys(labels)\n\n    for label in labels:\n        url_type = {}\n        for type in types:\n            #            models = []\n            for url in urls:\n                models = []\n                for model in api_client.configuration:\n                    if model[\"type\"] == type and model[\"url\"] == url and label in model[\"labels\"]:\n                        models.append(model['name'])\n                        if model['type'] not in url_type.keys():\n                            url_type[model['type']] = {}\n                        url_type[model['type']][model['url']] = models\n                        master_dict[label] = url_type\n\n    special_labels = []\n    for file in os.listdir(os.path.join(os.getcwd(), \"supported_methods\")):\n        if file != 'common_labels.py' and file != '__init__.py' and file != '__pycache__':\n            special_label = file.split(\".\")[0]\n            special_labels.append(special_label)\n\n    for user_label in labels:\n        if user_label in special_labels:\n            class_name = user_label.title()\n            x = getattr(__import__(user_label), class_name)()\n            attr = getattr(x, \"get_labels\")(class_name)\n            master_dict[user_label]['technique'] = attr\n\n        else:\n            class_name = user_label.title()\n            x = getattr(__import__('common_labels'), 'CommonLabels')()\n            attr = getattr(x, \"get_labels\")(class_name)\n            master_dict[user_label]['technique'] = attr\n    return master_dict\n\n\n'''\n\nOld version\nimport os\nimport sys\nimport json\nfrom APIClient import ApiClient\n\nsys.path.append(\"supported_methods\")\n\n\ndef labels_methods():\n    \"\"\"\n    Returns a list of the available labels with their available anonymization methods, the urls\n    that supports them, the type of each url and the model names\n    :return: List of json objects\n    \"\"\"\n    api_client = ApiClient()\n    labels = []\n    master_dict = {}\n    types = []\n    for model in api_client.configuration:\n        types.append(model['type'])\n        for label in model[\"labels\"]:\n            labels.append(label)\n\n    master_dict = master_dict.fromkeys(labels)\n\n    for label in labels:\n        url_type = {}\n        for type in types:\n            models = []\n            for model in api_client.configuration:\n                if model[\"type\"] == type and label in model[\"labels\"]:\n                    models.append(model['name'])\n                    url_type[model['type']] = {model['url']: models}\n                    master_dict[label] = url_type\n\n    special_labels = []\n    for file in os.listdir(os.path.join(os.getcwd(), \"supported_methods\")):\n        if file != 'common_labels.py' and file != '__init__.py' and file != '__pycache__':\n            special_label = file.split(\".\")[0]\n            special_labels.append(special_label)\n\n    for user_label in labels:\n        if user_label in special_labels:\n            class_name = user_label.title()\n            x = getattr(__import__(user_label), class_name)()\n            attr = getattr(x, \"get_labels\")(class_name)\n            master_dict[user_label]['technique'] = attr\n\n        else:\n            class_name = user_label.title()\n            x = getattr(__import__('common_labels'), 'CommonLabels')()\n            attr = getattr(x, \"get_labels\")(class_name)\n            master_dict[user_label]['technique'] = attr\n    return master_dict\n\n'''\n"
  },
  {
    "path": "src/main/models.py",
    "content": "class ApiResponse:\n\n    def __init__(self, success=True, data=None, error=None):\n        \"\"\"\n        Defines the response shape\n        :param success: A boolean that returns if the request has succeeded or not\n        :param data: The model's response\n        :param error: The error in case an exception was raised\n        \"\"\"\n        self.data = data\n        self.error = error.__str__() if error is not None else ''\n        self.success = success\n"
  },
  {
    "path": "src/main/start.py",
    "content": "import io\nimport cv2\nfrom models import ApiResponse\nfrom labels import labels_methods\nfrom exceptions import ApplicationError\nfrom fastapi import UploadFile, File, FastAPI, Form\nfrom fastapi.responses import StreamingResponse\nfrom anonymization_service import AnonymizationService\nimport helpers\nimport config\n\napp = FastAPI(version='2.0', title='BMW Anonymization API',\n              description=\"<h3><b>API that localizes and obfuscates sensitive information in images/videos in order to preserve the individuals anonymity.</b></h3>\"\n\t\t\t\t\t\t  \"<b>Developers:</b></br>\"\n                          \"<b>Ghenwa Aoun</b>\"\n                          \"</br> \"\n\t\t\t\t\t\t  \"<b>Antoine Charbel</b></br>\"\n                          \"<b>Jimmy Tekli</b>\"\n                          \"</br>\"\n                          \"<br><b>Contact us:</b></br>\"\n\t\t\t\t\t\t  \"<b>BMW Innovation Lab: <a href='mailto:innovation-lab@bmw.de'>innovation-lab@bmw.de</a></b>\")\nanonymizationservice = AnonymizationService()\nconfig.master_dict = labels_methods()\n\nurl_config_path = \"../jsonFiles/url_configuration.json\"\n\n@app.get('/list_urls', tags=[\"Configuration\"])\ndef list_urls():\n    \"\"\"\n    list all available urls in the url json file\n    :return: ApiResponse\n    \"\"\"\n    try:\n        payload = helpers.parse_json(url_config_path)\n    except Exception as e:\n        return ApiResponse(success=False, error=e)\n    return ApiResponse(data=payload)\n\n\n@app.post('/set_url', tags=[\"Configuration\"])\ndef set_url(url: str = Form(...)):\n    \"\"\"\n    Add url to the url json file\n    :param url: api url in the format: http://ip:port/\n    :return: ApiResponse\n    \"\"\"\n    try:\n        payload = helpers.parse_json(url_config_path)\n        response = helpers.check_api_availability(url)\n    except Exception as e:\n        return ApiResponse(success=False, error=e)\n    if response.status_code == 200:\n        if url not in payload['urls']:\n            payload['urls'].append(url)\n            helpers.write_json(payload, url_config_path)\n            data = \"url added successfully\"\n        else:\n            data = \"url already exist\"\n        return ApiResponse(data=data)\n    else:\n        return ApiResponse(success=False, error=\"url trying to add is not reachable\")\n\n\n@app.post('/remove_url', tags=[\"Configuration\"])\ndef remove_url(url: str = Form(...)):\n    \"\"\"\n        Remove url from the url json file\n        :param url: api url in the format: http://ip:port/\n        :return: ApiResponse\n        \"\"\"\n    try:\n        payload = helpers.parse_json(url_config_path)\n    except Exception as e:\n        return ApiResponse(success=False, error=e)\n    if url in payload['urls']:\n        payload['urls'].remove(url)\n        helpers.write_json(payload, url_config_path)\n        return ApiResponse(data={\"url removed successfully\"})\n    else:\n        return ApiResponse(success=False, error=\"url is not present in config file\")\n\n\n@app.post('/remove_all_urls', tags=[\"Configuration\"])\ndef remove_all_urls():\n    \"\"\"\n    Remove all available urls in the url json file\n    :return: ApiResponse\n    \"\"\"\n    payload = {\"urls\": []}\n    helpers.write_json(payload, url_config_path)\n    return ApiResponse(data=\"all urls removed successfully\")\n\n\n@app.get('/available_methods/', tags=[\"Configuration\"])\ndef get_available_methods():\n    \"\"\"\n    :return: A list that shows the model name, the urls and the model types that support each label in addition to the anonymization techniques that can be applied to each of them\n    \"\"\"\n    try:\n        config.master_dict = labels_methods()\n        return config.master_dict\n    except Exception:\n        return ApiResponse(success=False, error='unexpected server error')\n\n\n@app.post('/anonymize/', tags=[\"Anonymization\"])\ndef anonymize(image: UploadFile = File(...), configuration: UploadFile = File(...)):\n    \"\"\"\n    Anonymize the given image\n    :param image: Image file\n    :param configuration: Json file\n    :return: The anonymized image\n    \"\"\"\n    try:\n        result, errors = anonymizationservice.anonymize(image, configuration)\n        if not errors:\n            _, im_png = cv2.imencode(\".png\", result)\n            response = StreamingResponse(io.BytesIO(im_png.tobytes()), media_type=\"image/jpeg\")\n            return response\n        else:\n            return ApiResponse(success=False,\n                               error=\"Some data in your configuration file need to be modified. Check the /available_methods/ endpoint\",\n                               data=errors)\n    except ApplicationError as e:\n        return ApiResponse(success=False, error=e)\n    except Exception:\n        return ApiResponse(success=False, error='unexpected server error')\n\n\n@app.post('/anonymize_video/', tags=[\"Anonymization\"])\ndef anonymize_video(video: UploadFile = File(...), configuration: UploadFile = File(...)):\n    \"\"\"\n    Anonymize the given video  and save it to src/main/anonymized_video as original_video_name_TIMESTAMP.mp4\n    :param video: Video file\n    :param configuration: Json file\n    \"\"\"\n    try:\n        return anonymizationservice.anonymize_video(video, configuration)\n    except ApplicationError as e:\n        return ApiResponse(success=False, error=e)\n    except Exception:\n        return ApiResponse(success=False, error='unexpected server error')\n"
  },
  {
    "path": "src/main/strategy_context.py",
    "content": "from anonymization.base_anonymization import BaseAnonymization\n\n\nclass StrategyContext:\n    def __init__(self):\n        pass\n\n    def anonymize(self, detection_type: BaseAnonymization, technique: str, image, response, degree,label_id, mask):\n        \"\"\"\n        :param detection_type: Either it is semantic segmentation or object detection\n        :param technique: The anonymization method\n        :param image: Input image\n        :param response: The bounding boxes taken from the output of the inference api\n        :param degree: The degree used to specify the opacity of the anonymization\n        :param label_id: The id of the detected class\n        :param mask: The mask used to apply the anonymzation\n        :return:\n        \"\"\"\n        return getattr(detection_type, technique)(image, response, degree, label_id, mask)\n"
  },
  {
    "path": "src/main/supported_methods/__init__.py",
    "content": ""
  },
  {
    "path": "src/main/supported_methods/common_labels.py",
    "content": "class CommonLabels:\n    def __init__(self):\n        self.blackening = None\n        self.pixelating = None\n        self.blurring = None\n\n    def get_labels(self, label_name):\n        methods = []\n        for key, value in self.__dict__.items():\n            methods.append(key)\n        return methods\n"
  },
  {
    "path": "src/main/urlConfigurationSchema",
    "content": "{\n    \"type\": \"object\",\n    \"required\": [\n        \"urls\"\n    ],\n    \"properties\": {\n        \"urls\": {\n            \"type\": \"array\",\n            \"items\": {\n                \"type\": \"string\"\n            }\n        }\n    }\n}"
  },
  {
    "path": "testing_script/test.py",
    "content": "import os\nimport sys\nimport time\nimport requests\n\nstart_time = time.time()\n\nurl = \"http://ip:port/anonymize/\"\nj = 0\nlist_of_images = os.listdir(\"large\")\n\nfor i in list_of_images:\n    j = j + 1\n    files = {\"image\": open(\"large/\" + i, 'rb'),\n             \"configuration\": open('user_configuration.json', 'rb')}\n    response = requests.post(url, files=files)\n    print(\"Total time: \" + str(time.time() - start_time))\n    with open(\"results/anonymized_\" + i, 'wb') as f:\n        f.write(response.content)\n    if response.status_code != 200:\n        print(time.time() - start_time)\n        sys.exit(\"error\")\n"
  },
  {
    "path": "testing_script/user_configuration.json",
    "content": "{\n  \"sensitive_info\": [\n    {\n      \"model_name\": \"yolo\",\n      \"class_name\": \"person\",\n      \"anonymization_technique\": \"blackening\",\n      \"inference_type\": \"detection\",\n      \"anonymization_degree\": 1\n    }\n  ]\n}\n"
  },
  {
    "path": "url_for_openvino_compose/url_configuration.json",
    "content": "{\"urls\": [\"http://openvino_detection_api:80/\", \"http://openvino_segmentation_api:80/\"]}\n"
  }
]