[
  {
    "path": ".gitignore",
    "content": "# Project particular ignores\nbackup/\nvideos/inputs/*\nvideos/outputs/*\nimages*/\n*.mp4\n.DS_Store\n.vscode/\n\n# Intermediate files to convert MobileNetV2 from TF->TF-TRT\nutils/*.png\nmobilenetv2/inference_graph_*/\nmobilenetv2/converted_trt_*.pb\n\n# Avoid weights, or engines\n*.weights\n*.trt\n*.engine\n*.onnx\n\n# Environment files (not templates)\nserver/*.env\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n*.o\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n"
  },
  {
    "path": "Dockerfile",
    "content": "# Installs maskcam on a BalenaOS container (devkit or Photon)\nFROM balenalib/jetson-nano-ubuntu:20210201\n\n# Don't prompt with any configuration questions\nENV DEBIAN_FRONTEND noninteractive\n\n# Switch the nvidia apt source repos and\n# install some utilities\n\nRUN \\\n    apt-get update && apt-get install -y \\\n    lbzip2 wget tar python3 git\n\nENV UDEV=1\n\n# Download and install BSP binaries for L4T 32.4.2\n# This is mostly from Balena's Alan Boris at:\n# https://github.com/balena-io-playground/jetson-nano-sample-new/blob/master/CUDA/Dockerfile\n\nRUN apt-get update && apt-get install -y wget tar python3 libegl1 && \\\n    wget https://developer.nvidia.com/embedded/L4T/r32_Release_v4.2/t210ref_release_aarch64/Tegra210_Linux_R32.4.2_aarch64.tbz2 && \\\n    tar xf Tegra210_Linux_R32.4.2_aarch64.tbz2 && \\\n    cd Linux_for_Tegra && \\\n    sed -i 's/config.tbz2\\\"/config.tbz2\\\" --exclude=etc\\/hosts --exclude=etc\\/hostname/g' apply_binaries.sh && \\\n    sed -i 's/install --owner=root --group=root \\\"${QEMU_BIN}\\\" \\\"${L4T_ROOTFS_DIR}\\/usr\\/bin\\/\\\"/#install --owner=root --group=root \\\"${QEMU_BIN}\\\" \\\"${L4T_ROOTFS_DIR}\\/usr\\/bin\\/\\\"/g' nv_tegra/nv-apply-debs.sh && \\\n    sed -i 's/LC_ALL=C chroot . mount -t proc none \\/proc/ /g' nv_tegra/nv-apply-debs.sh && \\\n    sed -i 's/umount ${L4T_ROOTFS_DIR}\\/proc/ /g' nv_tegra/nv-apply-debs.sh && \\\n    sed -i 's/chroot . \\//  /g' nv_tegra/nv-apply-debs.sh && \\\n    ./apply_binaries.sh -r / --target-overlay && cd .. && \\\n    rm -rf Tegra210_Linux_R32.4.2_aarch64.tbz2 && \\\n    rm -rf Linux_for_Tegra && \\\n    echo \"/usr/lib/aarch64-linux-gnu/tegra\" > /etc/ld.so.conf.d/nvidia-tegra.conf && \\\n    echo \"/usr/lib/aarch64-linux-gnu/tegra-egl\" > /etc/ld.so.conf.d/nvidia-tegra-egl.conf && ldconfig\n\n# Install GStreamer and remove unnecessary files\nRUN apt-get install -y \\\n    libssl1.0.0 \\\n    libgstreamer1.0-0 \\\n    gstreamer1.0-tools \\\n    gstreamer1.0-plugins-good \\\n    gstreamer1.0-plugins-bad \\\n    gstreamer1.0-plugins-ugly \\\n    gstreamer1.0-libav \\\n    libgstrtspserver-1.0-0 \\\n    libjansson4=2.11-1 \\\n    cuda-toolkit-10-2 && \\\n    ldconfig\nRUN \\\n  rm -rf /usr/src/nvidia/graphics_demos \\\n     /usr/local/cuda-10.2/samples \\\n     /usr/local/cuda-10.2/doc \n\n# Install DeepStream\nRUN apt-get install -y deepstream-5.0 && \\\n  rm -rf /opt/nvidia/deepstream/deepstream-5.0/samples \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_cnn_infer_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libnvinfer_static.a \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_adv_infer_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libcublas_static.a \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_adv_train_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_ops_infer_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libcublasLt_static.a \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_cnn_train_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libcudnn_ops_train_static_v8.a \\\n     /usr/lib/aarch64-linux-gnu/libmyelin_compiler_static.a \\\n     /usr/lib/aarch64-linux-gnu/libmyelin_executor_static.a \\\n     /usr/lib/aarch64-linux-gnu/libnvinfer_plugin_static.a && \\\n     ldconfig\n\n# Install system-level python3 packages\nRUN apt-get update && apt-get install -y \\\n  gir1.2-gst-rtsp-server-1.0 \\\n  python3-pip \\\n  python3-opencv \\\n  python3-libnvinfer \\\n  python3-scipy \\\n  cython3 \\\n  python3-sklearn \\\n  python-gi-dev \\\n  unzip && ldconfig\n\n# These system-level packages don't provide egg-info files, add them manually so that pip knows\nCOPY docker/opencv_python-3.2.0.egg-info /usr/lib/python3/dist-packages/\nCOPY docker/scikit-learn-0.19.1.egg-info /usr/lib/python3/dist-packages/\n\n# Install gst-python (python bindings for GStreamer)\nRUN \\\n   export GST_CFLAGS=\"-pthread -I/usr/include/gstreamer-1.0 -I/usr/include/glib-2.0 -I/usr/lib/x86_64-linux-gnu/glib-2.0/include\" && \\\n   export GST_LIBS=\"-lgstreamer-1.0 -lgobject-2.0 -lglib-2.0\" && \\\n   git clone https://github.com/GStreamer/gst-python.git && \\\n   cd gst-python && git checkout 1a8f48a && \\\n   ./autogen.sh PYTHON=python3 && \\\n   ./configure PYTHON=python3 && \\\n   make && make install\n\n# Install pyds (python bindings for DeepStream)\nRUN cd /opt/nvidia/deepstream/deepstream-5.0/lib && python3 setup.py install\n\n# Upgrade here to avoid re-running on code changes\nRUN pip3 install --upgrade pip\n\n# ---- Below steps are run before copying full maskcam code to allow layer caching ----\n\n# Compile YOLOv4 plugin for DeepStream\nCOPY deepstream_plugin_yolov4 /deepstream_plugin_yolov4\nENV CUDA_VER=10.2\nRUN cd /deepstream_plugin_yolov4 && make\n\n# Get TensorRT engine (pretrained YOLOv4-tiny)\n# Model trained on smaller dataset\n# RUN wget -P / https://maskcam.s3.us-east-2.amazonaws.com/facemask_y4tiny_1024_608_fp16.trt\n\n# Model trained on bigger dataset, merged with MAFA, WiderFace, Kaggle Medical Masks and FDDB\nRUN wget -P / https://maskcam.s3.us-east-2.amazonaws.com/maskcam_y4t_1024_608_fp16.trt\n# RUN wget -P / https://maskcam.s3.us-east-2.amazonaws.com/maskcam_y4t_1120_640_fp16.trt\n\n# Install requirements with pinned versions\nCOPY requirements.txt /maskcam_requirements.txt\nRUN pip3 install -r /maskcam_requirements.txt\n\n# ---- Note: all layers below this will be re-generated each time code changes ----\n# Copy full maskcam code\nCOPY . /opt/maskcam_1.0/\nWORKDIR /opt/maskcam_1.0\n\n# Move pre-copied files to their maskcam location\n# NOTE: Ignoring errors with `exit 0` to avoid breaking on balena livepush\nRUN rm -r deepstream_plugin_yolov4 && mv /deepstream_plugin_yolov4 . ; exit 0\nRUN mv /*.trt yolo/ ; exit 0\n\n# Preload library to avoids Gst errors \"cannot allocate memory in static TLS block\"\nENV LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n\n# Un-pinned versions of maskcam requirements (comment pip3 install above before this)\n# RUN pip3 install -r requirements.in -c docker/constraints.docker\n\nRUN chmod +x docker/start.sh\nRUN chmod +x maskcam_run.py\nCMD [\"docker/start.sh\"]\n"
  },
  {
    "path": "LICENSE.md",
    "content": "MIT License\nCopyright (c) 2020, 2021 Berkeley Design Technology, Inc.. All rights reserved.\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "README.md",
    "content": "# MaskCam <!-- omit in toc -->\n\n<p align=\"center\">\n  <img src=\"/docs/imgs/MaskCam-Demo1.gif\">\n</p>\n\nMaskCam is a prototype reference design for a Jetson Nano-based smart camera system that measures crowd face mask usage in real-time, with all AI computation performed at the edge. MaskCam detects and tracks people in its field of view and determines whether they are wearing a mask via an object detection, tracking, and voting algorithm. It uploads statistics (not videos) to the cloud, where a web GUI can be used to monitor face mask compliance in the field of view. It saves interesting video snippets to local disk (e.g., a sudden influx of lots of people not wearing masks) and can optionally stream video via RTSP.\n\nMaskCam can be run on a Jetson Nano Developer Kit, or on a Jetson Nano module (SOM) with the ConnectTech Photon carrier board. It was designed to use the Raspberry Pi High Quality Camera but will also work with pretty much any USB webcam that is supported on Linux.\n\nThe on-device software stack is mostly written in Python and runs under JetPack 4.4.1 or 4.5. Edge AI processing is handled by NVIDIA’s DeepStream video analytics framework, YOLOv4-tiny, and Tryolabs' [Norfair](https://github.com/tryolabs/norfair) tracker.  MaskCam reports statistics to and receives commands from the cloud using MQTT and a web-based GUI. The software is containerized and for evaluation can be easily installed on a Jetson Nano DevKit using docker with just a couple of commands. For production, MaskCam can run under balenaOS, which makes it easy to manage and deploy multiple devices.\n\nWe urge you to try it out! It’s easy to install on a Jetson Nano Developer Kit and requires only a web cam. (The cloud-based statistics server and web GUI are optional, but are also dockerized and easy to install on any reasonable Linux system.)  [See below for installation instructions.](#running-maskcam-from-a-container-on-a-jetson-nano-developer-kit)\n\nMaskCam was developed by Berkeley Design Technology, Inc. (BDTI) and Tryolabs S.A., with development funded by NVIDIA. MaskCam is offered under the MIT License. For more information about MaskCam, please see the [report from BDTI](https://www.bdti.com/maskcam). If you have questions, please email us at maskcam@bdti.com. Thanks!\n\n## Table of contents <!-- omit in toc -->\n- [Start Here!](#start-here)\n  - [Running MaskCam from a Container on a Jetson Nano Developer Kit](#running-maskcam-from-a-container-on-a-jetson-nano-developer-kit)\n  - [Viewing the Live Video Stream](#viewing-the-live-video-stream)\n  - [Setting Device Configuration Parameters](#setting-device-configuration-parameters)\n- [MQTT Server Setup](#mqtt-and-web-server-setup)\n  - [Running the MQTT Broker and Web Server](#running-the-mqtt-broker-and-web-server)\n  - [Setup a Device with Your Server](#setup-a-device-with-your-server)\n  - [Checking MQTT Connection](#checking-mqtt-connection)\n- [Working With the MaskCam Container](#working-with-the-maskcam-container)\n  - [Development Mode: Manually Running MaskCam](#development-mode-manually-running-maskcam)\n  - [Debugging: Running MaskCam Modules as Standalone Processes](#debugging-running-maskcam-modules-as-standalone-processes)\n- [Additional Information](#additional-information)\n  - [Running on Jetson Nano Developer Kit Using BalenaOS](#running-on-jetson-nano-developer-kit-using-balenaos)\n  - [Custom Container Development](#custom-container-development)\n    - [Building From Source on Jetson Nano Developer Kit](#building-from-source-on-jetson-nano-developer-kit)\n    - [Using Your Own Detection Model](#using-your-own-detection-model)\n  - [Installing MaskCam Manually (Without a Container)](#installing-maskcam-manually-without-a-container)\n  - [Running on Jetson Nano with Photon Carrier Board](#running-on-jetson-nano-with-photon-carrier-board)\n  - [Useful Development Scripts](#useful-development-scripts)\n- [Troubleshooting Common Errors](#troubleshooting-common-errors)\n\n\n## Start Here!\n### Running MaskCam from a Container on a Jetson Nano Developer Kit\nThe easiest and fastest way to get MaskCam running on your Jetson Nano Dev Kit is using our pre-built containers.  You will need:\n\n1. A Jetson Nano Dev Kit running JetPack 4.4.1 or 4.5\n2. An external DC 5 volt, 4 amp power supply connected through the Dev Kit's barrel jack connector (J25). (See [these instructions](https://www.jetsonhacks.com/2019/04/10/jetson-nano-use-more-power/) on how to enable barrel jack power.) This software makes full use of the GPU, so it will not run with USB power.\n3. A USB webcam attached to your Nano\n4. Another computer with a program that can display RTSP streams -- we suggest [VLC](https://www.videolan.org/vlc/index.html) or [QuickTime](https://www.apple.com/quicktime/download/).\n\nFirst, the MaskCam container needs to be downloaded from Docker Hub. On your Nano, run:\n```\n# This will take 10 minutes or more to download\nsudo docker pull maskcam/maskcam-beta\n```\n\nFind your local Jetson Nano IP address using `ifconfig`. This address will be used later to view a live video stream from the camera and to interact with the Nano from a web server.\n\nMake sure a USB camera is connected to the Nano, and then start MaskCam by running the following command. Make sure to substitute `<your-jetson-ip>` with your Nano's IP address.\n```\n# Connect USB camera before running this!\nsudo docker run --runtime nvidia --privileged --rm -it --env MASKCAM_DEVICE_ADDRESS=<your-jetson-ip> -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\n\nThe MaskCam container should start running the `maskcam_run.py` script, using the USB camera as the default input device (`/dev/video0`). It will produce various status output messages (and error messages, if it encounters problems). If there are errors, the process will automatically end after several seconds. Check the [Troubleshooting](#troubleshooting-common-errors) section for tips on resolving errors.\n\nOtherwise, after 30 seconds or so, it should continually generate status messages (such as `Processed 100 frames...`). Leave it running (don't press `Ctrl+C`, but be aware that the device will start heating up) and continue to the next section to visualize the video!\n\n### Viewing the Live Video Stream\nIf you scroll through the logs and don't see any errors, you should find a message like:\n\n```Streaming at rtsp://aaa.bbb.ccc.ddd:8554/maskcam```\n\nwhere `aaa.bbb.ccc.ddd` is the address that you provided in `MASKCAM_DEVICE_ADDRESS` previously. If you didn't provide an address, you'll see some unknown address label there, but the streaming will still work.\n\nYou can copy-paste that URL into your RSTP streaming viewer ([see how](https://user-images.githubusercontent.com/12506292/111346333-e14d8800-865c-11eb-9242-0ffa4f50547f.mp4) to do it with VLC) on another computer. If all goes well, you should be rewarded with streaming video of your Nano, with green boxes around faces wearing masks and red boxes around faces not wearing masks. An example video of the live streaming in action is shown below.\n\n<p align=\"center\">\n  <img src=\"/docs/imgs/MaskCam-Live1.gif\">\n</p>\n\nThis video stream gives a general demonstration of how MaskCam works. However, MaskCam also has other features, such as the ability to send mask detection statistics to the cloud and view them through a web browser. If you'd like to see these features in action, you'll need to set up an MQTT server, which is covered in the [MQTT Server Setup section](#mqtt-and-web-server-setup).\n\nIf you encounter any errors running the live stream, check the [Troubleshooting](#troubleshooting-common-errors) section for tips on resolving errors.\n\n### Setting Device Configuration Parameters\nMaskCam uses environment variables to configure parameters without having to rebuild the container or manually change the configuration file each time the program is run. For example, in the previous section we set the `MASKCAM_DEVICE_ADDRESS` variable to indicate our Nano's IP address. A list of configurable parameters is shown in [maskcam_config.txt](maskcam_config.txt). The mapping between environment variable names and configuration parameters is defined in [maskcam/config.py](maskcam/config.py).\n\nThis section shows how to set environment variables to change configuration parameters. For example, if you want to use the `/dev/video1` camera device rather than `/dev/video0`, you can define `MASKCAM_INPUT` when running the container:\n\n```\n# Run with MASKCAM_INPUT and MASKCAM_DEVICE_ADDRESS\nsudo docker run --runtime nvidia --privileged --rm -it --env MASKCAM_INPUT=v4l2:///dev/video1 --env MASKCAM_DEVICE_ADDRESS=<your-jetson-ip> -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\n\nAnother useful input device that you might want to use is a CSI camera (like the Raspberry Pi camera), and in that case you need to set `MASKCAM_INPUT=argus://0` instead of the value shown above.\n\nAs another example, if you have an already set up our MQTT and web server (as shown in [MQTT Server Setup section](#mqtt-and-web-server-setup)), you need to define\ntwo addtional environment variables, `MQTT_BROKER_IP` and `MQTT_DEVICE_NAME`. This allows your device to find the MQTT server and identify itself:\n\n```\n# Run with MQTT_BROKER_IP, MQTT_DEVICE_NAME, and MASKCAM_DEVICE_ADDRESS\nsudo docker run --runtime nvidia --privileged --rm -it --env MQTT_BROKER_IP=<server IP> --env MQTT_DEVICE_NAME=<a-unique-string-you-like> --env MASKCAM_DEVICE_ADDRESS=<your-jetson-ip> -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\n\n*If you have too many `--env` variables to add, it might be easier to create a [.env file](https://docs.docker.com/compose/env-file/) and point to it using the `--env-file` flag instead.*\n\n\n## MQTT and Web Server Setup\n### Running the MQTT Broker and Web Server\nMaskCam is intended to be set up with a web server that stores mask detection statistics and allows users to remotely interact with the device. We wrote code for instantiating a [server](server/) that receives statistics from the device, stores them in a database, and has a web-based GUI frontend to display them. A screenshot of the frontend for an example device is shown below.\n\n<p align=\"center\">\n  <img src=\"/docs/imgs/maskcam-frontend.PNG\">\n</p>\n\nYou can test out and explore this functionality by starting the server on a PC on your local network and pointing your Jetson Nano MaskCam device to it. This section gives instructions on how to do so. The MQTT broker and web server can be built and run on a Linux or OSX machine; we've tested it on Ubuntu 18.04LTS and OSX Big Sur. It can also be set up in an online AWS EC2 instance if you want to access it from outside of your local network.\n\nThe server consists of several docker containers that run together using [docker-compose](https://docs.docker.com/compose/install/). Install docker-compose on your machine by following the [installation instructions for your platform](https://docs.docker.com/compose/install/) before continuing. All other necessary packages and libraries will be automatically installed when you set up the containers in the next steps.\n\nAfter installing docker-compose, clone this repo:\n```\ngit clone https://github.com/bdtinc/maskcam.git\n```\n\nGo to the `server/` folder, which has all the needed components implemented on four containers: the Mosquitto broker, backend API, database, and Streamlit frontend.\n\nThese containers are configured using environment variables, so create the `.env` files by copying the default templates:\n```\ncd server\ncp database.env.template database.env\ncp frontend.env.template frontend.env\ncp backend.env.template backend.env\n```\n\nThe only file that needs to be changed is `database.env`. Open it with a text editor and replace the `<DATABASE_USER>`, `<DATABASE_PASSWORD>`, and `<DATABASE_NAME>` fields with your own values. Here are some example values, but you better be more creative for security reasons:\n```\nPOSTGRES_USER=postgres\nPOSTGRES_PASSWORD=some_password\nPOSTGRES_DB=maskcam\n```\n\n*NOTE:* If you want to change any of the `database.env` values after building the containers, the easiest thing to do is to delete the `pgdata` volume by running `docker volume rm pgdata`. It will also delete all stored database information and statistics.\n\nAfter editing the database environment file, you're ready to build all the containers and run them with a single command:\n\n```\nsudo docker-compose up -d\n```\n\nWait a couple minutes after issuing the command to make sure that all containers are built and running. Then, check the local IP of your computer by running the `ifconfig` command. (It should be an address that starts with `192.168...`, `10...` or `172...`.) This is the server IP that will be used for connecting to the server (since the server is hosted on this computer).\n\nNext, open a web browser and enter the server IP to visit the frontend webpage:\n```\nhttp://<server IP>:8501/\n```\nIf you see a `ConnectionError` in the frontend, wait a couple more seconds and reload the page. The backend container can take some time to finish the database setup.\n\n*NOTE:* If you're setting the server up on a remote instance like an AWS EC2, make sure you have ports `1883` (MQTT) and `8501` (web frontend) open for inbound and outbound traffic.\n\n\n### Setup a Device With Your Server\nOnce you've got the server set up on a local machine (or in a AWS EC2 instance with a public IP), switch back to the Jetson Nano device. Run the MaskCam container using the following command, where `MQTT_BROKER_IP` is set to the IP of your server. (If you're using an AWS EC2 server, make sure to configure port `1883` for inbound and outbound traffic before running this command.)\n\n```\n# Run with MQTT_BROKER_IP, MQTT_DEVICE_NAME, and MASKCAM_DEVICE_ADDRESS\nsudo docker run --runtime nvidia --privileged --rm -it --env MQTT_BROKER_IP=<server IP> --env MQTT_DEVICE_NAME=my-jetson-1 --env MASKCAM_DEVICE_ADDRESS=<your-jetson-ip> -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\n\nAnd that's it. If the device has access to the server's IP, then you should see in the output logs some successful connection messages and then see your device listed in the drop-down menu of the frontend (reload the page if you don't see it). In the frontend, select `Group data by: Second` and hit `Refresh status` to see how the plot changes when new data arrives.\n\nCheck the next section if the MQTT connection is not established from the device to the server.\n\n### Checking MQTT Connection\nIf you're running the MQTT broker on a machine in your local network, make sure its IP is accessible from the Jetson device:\n```\nping <local server IP>\n```\n\n*NOTE:* Remember to use the network address of the computer you set up the server on, which you can check using the `ifconfig` command and looking for an address that should start with `192.168...`, `10...` or `172...`\n\nIf you're setting up a remote server and using its public IP to connect\nfrom your device, chances are you're not setting properly the port `1883` to be opened for inbound and outbound traffic.\nIf you want to check the port is correctly configured, use `nc` from a local machine or your jetson:\n```\nnc -vz <server IP> 1883\n```\nRemember you also need to open port `8501` to access the web server frontend from a web browser, as explained in the [server configuration section](#running-the-mqtt-broker-and-web-server) (but that's not relevant for the MQTT communication with the device).\n\n\n\n\n## Working With the MaskCam Container\n### Development Mode: Manually Running MaskCam\nIf you want to play around with the code, you probably don't want the container to automatically start running the `maskcam_run.py` script.\nThe easiest way to achieve that, is by defining the environment variable `DEV_MODE=1`:\n```\ndocker run --runtime nvidia --privileged --rm -it --env DEV_MODE=1 -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\nThis will cause the container to start a `/bin/bash` prompt (see [docker/start.sh](docker/start.sh) for details), from which you could run the script manually, or any\nof its sub-modules as standalone processes:\n\n```\n# e.g: Run with a different input instead of default `/dev/video0`\n./maskcam_run.py v4l2:///dev/video1\n\n# e.g: Disable tracker to visualize raw detections and scores\nMASKCAM_DISABLE_TRACKER=1 ./maskcam_run.py\n```\n\n### Debugging: Running MaskCam Modules as Standalone Processes\nThe script `maskcam_run.py`, which is the main entrypoint for the MaskCam software, has two roles:\n - Handles all the MQTT communication (send stats and receive commands)\n - Orchestrates all other processes that live under `maskcam/maskcam_*.py`.\n\nBut you can actually run any of those modules as standalone processes, which can be easier for debugging.\n\nYou need to set `DEV_MODE=1` as explained in the previous section to access the container prompt, and then you can run the python modules:\n\n```\n# e.g: Run only the static file server process\npython3 -m maskcam.maskcam_fileserver\n# e.g: Serve another directory to test\npython3 -m maskcam.maskcam_fileserver /tmp\n\n# e.g: Run only the inference and streaming processes\npython3 -m maskcam.maskcam_streaming &\n# Hit enter until you get a prompt and then:\npython3 -m maskcam.maskcam_inference\n```\n\n**Note:** In the last example, `maskcam_streaming` is running on background,\nso it will not terminate if you press `Ctrl+C` (only `maskcam_inference` will,\nsince it's running on the foreground).\n\nTo check that the streaming is still running and then bring it to foreground to terminate it, run:\n```\njobs\nfg %1\n# Now you can hit Ctrl+C to terminate streaming\n```\n\n## Additional Information\nFurther information about working with and customizing MaskCam is provided on separate pages in the [docs](docs) folder. This section gives a brief description and link to each page.\n\n### Running on Jetson Nano Developer Kit Using BalenaOS\n[BalenaOS](https://www.balena.io/os/) is a lightweight operating system designed for running containers on embedded devices. It provides several advantages for fleet deployment and management, especially when combined with balena's balenaCloud mangament system. If you'd like to try running MaskCam with balenaOS instead of JetPack OS on your Jetson Nano, please follow the instructions at [BalenaOS-DevKit-Nano-Setup.md](docs/BalenaOS-DevKit-Nano-Setup.md).\n\n### Custom Container Development\nMaskCam is intended to be a reference design for any connected smart camera application. You can create your own application by starting from our pre-built container, modifying it to add the code files and packages needed for your program, and then re-building the container. The [Custom-Container-Development.md](docs/Custom-Container-Development.md) gives instructions on how to build your own container based off MaskCam.\n\n#### Building From Source on Jetson Nano Developer Kit\nPlease see [How to Build your Own Container from Source on the Jetson Nano](https://github.com/bdtinc/maskcam/blob/main/docs/Custom-Container-Development.md#how-to-build-your-own-container-from-source-on-the-jetson-nano) for instructions on how to build a custom MaskCam container on your Jetson Nano Developer Kit.\n\n#### Using Your Own Detection Model\nPlease see [How to Use Your Own Detection Model](https://github.com/bdtinc/maskcam/blob/main/docs/Custom-Container-Development.md#how-to-use-your-own-detection-model) for instructions on how to use your own detection model rather than our mask detection model.\n\n### Installing MaskCam Manually (Without a Container)\nMaskCam can also be installed manually, rather than by downloading our pre-built container. Using a manual installation of MaskCam can help with development if you'd prefer not to work with containers. If you'd like to install MaskCam without using containers, please see [docs/Manual-Dependencies-Installation.md](docs/Manual-Dependencies-Installation.md).\n\n### Running on Jetson Nano with Photon Carrier Board\nFor our hardware prototype of MaskCam, we used a Jetson Nano module and a [Connect Tech Photon carrier board](https://connecttech.com/product/photon-jetson-nano-ai-camera-platform/), rather than the Jetson Nano Developer Kit. We used the Photon because the Developer Kit is not sold or warrantied for production use. Using the Photon allowed us to quickly create a production-ready prototype using off-the-shelf hardware. If you have a Photon carrier board and Jetson Nano module, you can install MaskCam on them by using the setup instructions at [docs/Photon-Nano-Setup.md](docs/Photon-Nano-Setup.md).\n\n### Useful Development Scripts\nDuring development, some scripts were produced which might be useful for other developers to debug or update the software. These include an MQTT sniffer, a script to run the TensorRT model on images, and to convert a model trained with the original YOLO Darknet implementation to TensorRT format. Basic usage for all these tools is covered on [docs/Useful-Development-Scripts.md](docs/Useful-Development-Scripts.md).\n\n\n## Troubleshooting Common Errors\nIf you run into any errors or issues while working with MaskCam, this section gives common errors and their solutions. \n\nMaskCam consists of many different processes running in parallel. As a consequence, when there's an error on a particular process, all of them will be sent termination signals and finish gracefully. This means that you need to scroll up through the output to find out the original error that caused a failure. It should be very notorious, flagged as a red **ERROR** log entry, followed by the name of the process that failed and a message.\n\n#### Error: camera not connected/not recognized\nIf you see an error containing the message `Cannot identify device '/dev/video0'`, among other Gst and v4l messages, it means the program couldn't find the camera device. Make sure your camera is connected to the Nano and recognized by the host Ubuntu OS by issuing `ls /dev` and checking if `/dev/video0` is present in the output.\n\n#### Error: not running in privileged mode\nIn this case, you'll see a bunch of annoying messages like:\n```\nError: Can't initialize nvrm channel\nCouldn't create ddkvic Session: Cannot allocate memory\nnvbuf_utils: Could not create Default NvBufferSession\n```\nYou'll probably see multiple failures in other MaskCam processes as well. To resolve these errors, make sure you're running docker with the `--privileged` flag, as described in the [first section](#running-maskcam-from-a-container-on-a-jetson-nano-developer-kit).\n\n#### Error: reason not negotiated/camera capabilities\nIf you get an error that looks like: `v4l-camera-source / reason not-negotiated`\nThen the problem is that the USB camera you're using doesn't support the default `camera-framerate=30` (frames per second). If you don't have another camera, try running the script under utils/gst_capabilities.sh and find the lines with type `video/x-raw ...`\n\nFind any suitable `framerate=X/1` (with `X` being an integer like 24, 15, etc.) and set the corresponding configuration parameter with `--env MASKCAM_CAMERA_FRAMERATE=X` (see [previous section](#setting-device-configuration-parameters)).\n\n#### Error: Streaming or file server are not accessible (nothing else seems to fail)\nMake sure you're mapping the right ports from the container, with the `-p container_port:host_port` parameters indicated in the previous sections. The default port numbers, that should be exposed by the container, are configured in [maskcam_config.txt](maskcam_config.txt) as:\n```\nfileserver-port=8080\nstreaming-port=8554\nmqtt-broker-port=1883\n```\nThese port mappings are why we use `docker run ...  -p 1883:1883 -p 8080:8080 -p 8554:8554 ...` with the run command. Remember that all the ports can be overriden using environment variables, as described in the [previous section](#setting-device-configuration-parameters). Other ports like `udp-port-*` are not intended to be accessible from outside the container, they are used for communication between the inference process and the streaming and file-saving processes.\n\n#### Other Errors\nSometimes after restarting the process or the whole docker container many times, some GPU resources can get stuck and cause unexpected errors. If that's the case, try rebooting the device and running the container again. If you find that the container fails systematically after running some sequence, please don't hesitate to [report an Issue](https://github.com/bdtinc/maskcam/issues) with the relevant context and we'll try to reproduce and fix it.\n\n## Questions? Need Help?\nEmail us at maskcam@bdti.com, and be sure to check out our [independent report on the development of MaskCam](https://bdti.com/maskcam)!\n\n"
  },
  {
    "path": "deepstream_plugin_yolov4/Makefile",
    "content": "################################################################################\n# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nCUDA_VER?=\nifeq ($(CUDA_VER),)\n  $(error \"CUDA_VER is not set\")\nendif\nCC:= g++\nNVCC:=/usr/local/cuda-$(CUDA_VER)/bin/nvcc\n\nCFLAGS:= -Wall -std=c++11 -shared -fPIC -Wno-error=deprecated-declarations\nCFLAGS+= -I../../includes -I/usr/local/cuda-$(CUDA_VER)/include -I/opt/nvidia/deepstream/deepstream-5.0/sources/includes\n\nLIBS:= -lnvinfer_plugin -lnvinfer -lnvparsers -L/usr/local/cuda-$(CUDA_VER)/lib64 -lcudart -lcublas -lstdc++fs -L/opt/nvidia/deepstream/deepstream-5.0/lib\nLFLAGS:= -shared -Wl,--start-group $(LIBS) -Wl,--end-group\n\nINCS:= $(wildcard *.h)\nSRCFILES:= nvdsinfer_yolo_engine.cpp \\\n           nvdsparsebbox_Yolo.cpp   \\\n           yoloPlugins.cpp    \\\n           trt_utils.cpp              \\\n           yolo.cpp              \\\n           kernels.cu\nTARGET_LIB:= libnvdsinfer_custom_impl_Yolo.so\n\nTARGET_OBJS:= $(SRCFILES:.cpp=.o)\nTARGET_OBJS:= $(TARGET_OBJS:.cu=.o)\n\nall: $(TARGET_LIB)\n\n%.o: %.cpp $(INCS) Makefile\n\t$(CC) -c -o $@ $(CFLAGS) $<\n\n%.o: %.cu $(INCS) Makefile\n\t$(NVCC) -c -o $@ --compiler-options '-fPIC' $<\n\n$(TARGET_LIB) : $(TARGET_OBJS)\n\t$(CC) -o $@  $(TARGET_OBJS) $(LFLAGS)\n\nclean:\n\trm -rf $(TARGET_LIB)\n"
  },
  {
    "path": "deepstream_plugin_yolov4/README.md",
    "content": "## YOLOv4 plugin for DeepStream\nThis plugin was obtained from: https://github.com/Tianxiaomo/pytorch-YOLOv4/tree/master/DeepStream\n\nIt must be compiled locally on the jetson device:\n```\nexport CUDA_VER=10.2\nmake\n```\n"
  },
  {
    "path": "deepstream_plugin_yolov4/kernels.cu",
    "content": "/*\n * Copyright (c) 2018-2019 NVIDIA Corporation.  All rights reserved.\n *\n * NVIDIA Corporation and its licensors retain all intellectual property\n * and proprietary rights in and to this software, related documentation\n * and any modifications thereto.  Any use, reproduction, disclosure or\n * distribution of this software and related documentation without an express\n * license agreement from NVIDIA Corporation is strictly prohibited.\n *\n */\n\n#include <cuda.h>\n#include <cuda_runtime.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <string.h>\n\ninline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }\n\n__global__ void gpuYoloLayerV3(const float* input, float* output, const uint gridSize, const uint numOutputClasses,\n                               const uint numBBoxes)\n{\n    uint x_id = blockIdx.x * blockDim.x + threadIdx.x;\n    uint y_id = blockIdx.y * blockDim.y + threadIdx.y;\n    uint z_id = blockIdx.z * blockDim.z + threadIdx.z;\n\n    if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes))\n    {\n        return;\n    }\n\n    const int numGridCells = gridSize * gridSize;\n    const int bbindex = y_id * gridSize + x_id;\n\n    output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]\n        = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]);\n\n    output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]\n        = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]);\n\n    output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]\n        = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]);\n\n    output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]\n        = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]);\n\n    output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]\n        = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);\n\n    for (uint i = 0; i < numOutputClasses; ++i)\n    {\n        output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]\n            = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);\n    }\n}\n\ncudaError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSize,\n                            const uint& numOutputClasses, const uint& numBBoxes,\n                            uint64_t outputSize, cudaStream_t stream);\n\ncudaError_t cudaYoloLayerV3(const void* input, void* output, const uint& batchSize, const uint& gridSize,\n                            const uint& numOutputClasses, const uint& numBBoxes,\n                            uint64_t outputSize, cudaStream_t stream)\n{\n    dim3 threads_per_block(16, 16, 4);\n    dim3 number_of_blocks((gridSize / threads_per_block.x) + 1,\n                          (gridSize / threads_per_block.y) + 1,\n                          (numBBoxes / threads_per_block.z) + 1);\n    for (unsigned int batch = 0; batch < batchSize; ++batch)\n    {\n        gpuYoloLayerV3<<<number_of_blocks, threads_per_block, 0, stream>>>(\n            reinterpret_cast<const float*>(input) + (batch * outputSize),\n            reinterpret_cast<float*>(output) + (batch * outputSize), gridSize, numOutputClasses,\n            numBBoxes);\n    }\n    return cudaGetLastError();\n}\n"
  },
  {
    "path": "deepstream_plugin_yolov4/nvdsinfer_yolo_engine.cpp",
    "content": "/*\n * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#include \"nvdsinfer_custom_impl.h\"\n#include \"nvdsinfer_context.h\"\n#include \"yoloPlugins.h\"\n#include \"yolo.h\"\n\n#include <algorithm>\n\n#define USE_CUDA_ENGINE_GET_API 1\n\nstatic bool getYoloNetworkInfo (NetworkInfo &networkInfo, const NvDsInferContextInitParams* initParams)\n{\n    std::string yoloCfg = initParams->customNetworkConfigFilePath;\n    std::string yoloType;\n\n    std::transform (yoloCfg.begin(), yoloCfg.end(), yoloCfg.begin(), [] (uint8_t c) {\n        return std::tolower (c);});\n\n    if (yoloCfg.find(\"yolov2\") != std::string::npos) {\n        if (yoloCfg.find(\"yolov2-tiny\") != std::string::npos)\n            yoloType = \"yolov2-tiny\";\n        else\n            yoloType = \"yolov2\";\n    } else if (yoloCfg.find(\"yolov3\") != std::string::npos) {\n        if (yoloCfg.find(\"yolov3-tiny\") != std::string::npos)\n            yoloType = \"yolov3-tiny\";\n        else\n            yoloType = \"yolov3\";\n    } else {\n        std::cerr << \"Yolo type is not defined from config file name:\"\n                  << yoloCfg << std::endl;\n        return false;\n    }\n\n    networkInfo.networkType     = yoloType;\n    networkInfo.configFilePath  = initParams->customNetworkConfigFilePath;\n    networkInfo.wtsFilePath     = initParams->modelFilePath;\n    networkInfo.deviceType      = (initParams->useDLA ? \"kDLA\" : \"kGPU\");\n    networkInfo.inputBlobName   = \"data\";\n\n    if (networkInfo.configFilePath.empty() ||\n        networkInfo.wtsFilePath.empty()) {\n        std::cerr << \"Yolo config file or weights file is NOT specified.\"\n                  << std::endl;\n        return false;\n    }\n\n    if (!fileExists(networkInfo.configFilePath) ||\n        !fileExists(networkInfo.wtsFilePath)) {\n        std::cerr << \"Yolo config file or weights file is NOT exist.\"\n                  << std::endl;\n        return false;\n    }\n\n    return true;\n}\n\n#if !USE_CUDA_ENGINE_GET_API\nIModelParser* NvDsInferCreateModelParser(\n    const NvDsInferContextInitParams* initParams) {\n    NetworkInfo networkInfo;\n    if (!getYoloNetworkInfo(networkInfo, initParams)) {\n      return nullptr;\n    }\n\n    return new Yolo(networkInfo);\n}\n#else\nextern \"C\"\nbool NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder * const builder,\n        const NvDsInferContextInitParams * const initParams,\n        nvinfer1::DataType dataType,\n        nvinfer1::ICudaEngine *& cudaEngine);\n\nextern \"C\"\nbool NvDsInferYoloCudaEngineGet(nvinfer1::IBuilder * const builder,\n        const NvDsInferContextInitParams * const initParams,\n        nvinfer1::DataType dataType,\n        nvinfer1::ICudaEngine *& cudaEngine)\n{\n    NetworkInfo networkInfo;\n    if (!getYoloNetworkInfo(networkInfo, initParams)) {\n      return false;\n    }\n\n    Yolo yolo(networkInfo);\n    cudaEngine = yolo.createEngine (builder);\n    if (cudaEngine == nullptr)\n    {\n        std::cerr << \"Failed to build cuda engine on \"\n                  << networkInfo.configFilePath << std::endl;\n        return false;\n    }\n\n    return true;\n}\n#endif\n"
  },
  {
    "path": "deepstream_plugin_yolov4/nvdsparsebbox_Yolo.cpp",
    "content": "/*\n * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#include <algorithm>\n#include <cassert>\n#include <cmath>\n#include <cstring>\n#include <fstream>\n#include <iostream>\n#include <unordered_map>\n#include \"nvdsinfer_custom_impl.h\"\n#include \"trt_utils.h\"\n\nstatic const int NUM_CLASSES_YOLO = 4; // This is just for checkings, keep updated by hand\n\nextern \"C\" bool NvDsInferParseCustomYoloV3(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList);\n\nextern \"C\" bool NvDsInferParseCustomYoloV3Tiny(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList);\n\nextern \"C\" bool NvDsInferParseCustomYoloV2(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList);\n\nextern \"C\" bool NvDsInferParseCustomYoloV2Tiny(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList);\n\nextern \"C\" bool NvDsInferParseCustomYoloTLT(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList);\n\nextern \"C\" bool NvDsInferParseCustomYoloV4(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList);\n\n/* This is a sample bounding box parsing function for the sample YoloV3 detector model */\nstatic NvDsInferParseObjectInfo convertBBox(const float &bx, const float &by, const float &bw,\n                                            const float &bh, const int &stride, const uint &netW,\n                                            const uint &netH)\n{\n    NvDsInferParseObjectInfo b;\n    // Restore coordinates to network input resolution\n    float xCenter = bx * stride;\n    float yCenter = by * stride;\n    float x0 = xCenter - bw / 2;\n    float y0 = yCenter - bh / 2;\n    float x1 = x0 + bw;\n    float y1 = y0 + bh;\n\n    x0 = clamp(x0, 0, netW);\n    y0 = clamp(y0, 0, netH);\n    x1 = clamp(x1, 0, netW);\n    y1 = clamp(y1, 0, netH);\n\n    b.left = x0;\n    b.width = clamp(x1 - x0, 0, netW);\n    b.top = y0;\n    b.height = clamp(y1 - y0, 0, netH);\n\n    return b;\n}\n\nstatic void addBBoxProposal(const float bx, const float by, const float bw, const float bh,\n                            const uint stride, const uint &netW, const uint &netH, const int maxIndex,\n                            const float maxProb, std::vector<NvDsInferParseObjectInfo> &binfo)\n{\n    NvDsInferParseObjectInfo bbi = convertBBox(bx, by, bw, bh, stride, netW, netH);\n    if (bbi.width < 1 || bbi.height < 1)\n        return;\n\n    bbi.detectionConfidence = maxProb;\n    bbi.classId = maxIndex;\n    binfo.push_back(bbi);\n}\n\nstatic std::vector<NvDsInferParseObjectInfo>\ndecodeYoloV2Tensor(\n    const float *detections, const std::vector<float> &anchors,\n    const uint gridSizeW, const uint gridSizeH, const uint stride, const uint numBBoxes,\n    const uint numOutputClasses, const uint &netW,\n    const uint &netH)\n{\n    std::vector<NvDsInferParseObjectInfo> binfo;\n    for (uint y = 0; y < gridSizeH; ++y)\n    {\n        for (uint x = 0; x < gridSizeW; ++x)\n        {\n            for (uint b = 0; b < numBBoxes; ++b)\n            {\n                const float pw = anchors[b * 2];\n                const float ph = anchors[b * 2 + 1];\n\n                const int numGridCells = gridSizeH * gridSizeW;\n                const int bbindex = y * gridSizeW + x;\n                const float bx = x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)];\n                const float by = y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)];\n                const float bw = pw * exp(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)]);\n                const float bh = ph * exp(detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)]);\n\n                const float objectness = detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)];\n\n                float maxProb = 0.0f;\n                int maxIndex = -1;\n\n                for (uint i = 0; i < numOutputClasses; ++i)\n                {\n                    float prob = (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]);\n\n                    if (prob > maxProb)\n                    {\n                        maxProb = prob;\n                        maxIndex = i;\n                    }\n                }\n                maxProb = objectness * maxProb;\n\n                addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo);\n            }\n        }\n    }\n    return binfo;\n}\n\nstatic std::vector<NvDsInferParseObjectInfo>\ndecodeYoloV3Tensor(\n    const float *detections, const std::vector<int> &mask, const std::vector<float> &anchors,\n    const uint gridSizeW, const uint gridSizeH, const uint stride, const uint numBBoxes,\n    const uint numOutputClasses, const uint &netW,\n    const uint &netH)\n{\n    std::vector<NvDsInferParseObjectInfo> binfo;\n    for (uint y = 0; y < gridSizeH; ++y)\n    {\n        for (uint x = 0; x < gridSizeW; ++x)\n        {\n            for (uint b = 0; b < numBBoxes; ++b)\n            {\n                const float pw = anchors[mask[b] * 2];\n                const float ph = anchors[mask[b] * 2 + 1];\n\n                const int numGridCells = gridSizeH * gridSizeW;\n                const int bbindex = y * gridSizeW + x;\n                const float bx = x + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 0)];\n                const float by = y + detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 1)];\n                const float bw = pw * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 2)];\n                const float bh = ph * detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 3)];\n\n                const float objectness = detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + 4)];\n\n                float maxProb = 0.0f;\n                int maxIndex = -1;\n\n                for (uint i = 0; i < numOutputClasses; ++i)\n                {\n                    float prob = (detections[bbindex + numGridCells * (b * (5 + numOutputClasses) + (5 + i))]);\n\n                    if (prob > maxProb)\n                    {\n                        maxProb = prob;\n                        maxIndex = i;\n                    }\n                }\n                maxProb = objectness * maxProb;\n\n                addBBoxProposal(bx, by, bw, bh, stride, netW, netH, maxIndex, maxProb, binfo);\n            }\n        }\n    }\n    return binfo;\n}\n\nstatic inline std::vector<const NvDsInferLayerInfo *>\nSortLayers(const std::vector<NvDsInferLayerInfo> &outputLayersInfo)\n{\n    std::vector<const NvDsInferLayerInfo *> outLayers;\n    for (auto const &layer : outputLayersInfo)\n    {\n        outLayers.push_back(&layer);\n    }\n    std::sort(outLayers.begin(), outLayers.end(),\n              [](const NvDsInferLayerInfo *a, const NvDsInferLayerInfo *b) {\n                  return a->inferDims.d[1] < b->inferDims.d[1];\n              });\n    return outLayers;\n}\n\nstatic bool NvDsInferParseYoloV3(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList,\n    const std::vector<float> &anchors,\n    const std::vector<std::vector<int>> &masks)\n{\n    const uint kNUM_BBOXES = 3;\n\n    const std::vector<const NvDsInferLayerInfo *> sortedLayers =\n        SortLayers(outputLayersInfo);\n\n    if (sortedLayers.size() != masks.size())\n    {\n        std::cerr << \"ERROR: yoloV3 output layer.size: \" << sortedLayers.size()\n                  << \" does not match mask.size: \" << masks.size() << std::endl;\n        return false;\n    }\n\n    if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)\n    {\n        std::cerr << \"WARNING: Num classes mismatch. Configured:\"\n                  << detectionParams.numClassesConfigured\n                  << \", detected by network: \" << NUM_CLASSES_YOLO << std::endl;\n    }\n\n    std::vector<NvDsInferParseObjectInfo> objects;\n\n    for (uint idx = 0; idx < masks.size(); ++idx)\n    {\n        const NvDsInferLayerInfo &layer = *sortedLayers[idx]; // 255 x Grid x Grid\n\n        assert(layer.inferDims.numDims == 3);\n        const uint gridSizeH = layer.inferDims.d[1];\n        const uint gridSizeW = layer.inferDims.d[2];\n        const uint stride = DIVUP(networkInfo.width, gridSizeW);\n        assert(stride == DIVUP(networkInfo.height, gridSizeH));\n\n        std::vector<NvDsInferParseObjectInfo> outObjs =\n            decodeYoloV3Tensor((const float *)(layer.buffer), masks[idx], anchors, gridSizeW, gridSizeH, stride, kNUM_BBOXES,\n                               NUM_CLASSES_YOLO, networkInfo.width, networkInfo.height);\n        objects.insert(objects.end(), outObjs.begin(), outObjs.end());\n    }\n\n    objectList = objects;\n\n    return true;\n}\n\nstatic NvDsInferParseObjectInfo convertBBoxYoloV4(const float &bx1, const float &by1, const float &bx2,\n                                                  const float &by2, const uint &netW, const uint &netH)\n{\n    NvDsInferParseObjectInfo b;\n    // Restore coordinates to network input resolution\n\n    float x1 = bx1 * netW;\n    float y1 = by1 * netH;\n    float x2 = bx2 * netW;\n    float y2 = by2 * netH;\n\n    x1 = clamp(x1, 0, netW);\n    y1 = clamp(y1, 0, netH);\n    x2 = clamp(x2, 0, netW);\n    y2 = clamp(y2, 0, netH);\n\n    b.left = x1;\n    b.width = clamp(x2 - x1, 0, netW);\n    b.top = y1;\n    b.height = clamp(y2 - y1, 0, netH);\n\n    return b;\n}\n\nstatic void addBBoxProposalYoloV4(const float bx, const float by, const float bw, const float bh,\n                                  const uint &netW, const uint &netH, const int maxIndex,\n                                  const float maxProb, std::vector<NvDsInferParseObjectInfo> &binfo)\n{\n    NvDsInferParseObjectInfo bbi = convertBBoxYoloV4(bx, by, bw, bh, netW, netH);\n    if (bbi.width < 1 || bbi.height < 1)\n        return;\n\n    bbi.detectionConfidence = maxProb;\n    bbi.classId = maxIndex;\n    binfo.push_back(bbi);\n}\n\nstatic std::vector<NvDsInferParseObjectInfo>\ndecodeYoloV4Tensor(\n    const float *boxes, const float *scores,\n    const uint num_bboxes, NvDsInferParseDetectionParams const &detectionParams,\n    const uint &netW, const uint &netH)\n{\n    std::vector<NvDsInferParseObjectInfo> binfo;\n\n    uint bbox_location = 0;\n    uint score_location = 0;\n    for (uint b = 0; b < num_bboxes; ++b)\n    {\n        float bx1 = boxes[bbox_location];\n        float by1 = boxes[bbox_location + 1];\n        float bx2 = boxes[bbox_location + 2];\n        float by2 = boxes[bbox_location + 3];\n\n        float maxProb = 0.0f;\n        int maxIndex = -1;\n\n        for (uint c = 0; c < detectionParams.numClassesConfigured; ++c)\n        {\n            float prob = scores[score_location + c];\n            if (prob > maxProb)\n            {\n                maxProb = prob;\n                maxIndex = c;\n            }\n        }\n\n        if (maxProb > detectionParams.perClassPreclusterThreshold[maxIndex])\n        {\n            addBBoxProposalYoloV4(bx1, by1, bx2, by2, netW, netH, maxIndex, maxProb, binfo);\n        }\n\n        bbox_location += 4;\n        score_location += detectionParams.numClassesConfigured;\n    }\n\n    return binfo;\n}\n\n/* C-linkage to prevent name-mangling */\n\nstatic bool NvDsInferParseYoloV4(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)\n    {\n        std::cerr << \"WARNING: Num classes mismatch. Configured:\"\n                  << detectionParams.numClassesConfigured\n                  << \", detected by network: \" << NUM_CLASSES_YOLO << std::endl;\n    }\n\n    std::vector<NvDsInferParseObjectInfo> objects;\n\n    const NvDsInferLayerInfo &boxes = outputLayersInfo[0];  // num_boxes x 4\n    const NvDsInferLayerInfo &scores = outputLayersInfo[1]; // num_boxes x num_classes\n    const NvDsInferLayerInfo &subbox = outputLayersInfo[2];\n    //* printf(\"%d\\n\", subbox.inferDims.numDims);\n    // 3 dimensional: [num_boxes, 1, 4]\n    assert(boxes.inferDims.numDims == 3);\n    // 2 dimensional: [num_boxes, num_classes]\n    assert(scores.inferDims.numDims == 2);\n\n    // The second dimension should be num_classes\n    assert(detectionParams.numClassesConfigured == scores.inferDims.d[1]);\n\n    uint num_bboxes = boxes.inferDims.d[0];\n\n    // std::cout << \"Network Info: \" << networkInfo.height << \"  \" << networkInfo.width << std::endl;\n\n    std::vector<NvDsInferParseObjectInfo> outObjs =\n        decodeYoloV4Tensor(\n            (const float *)(boxes.buffer), (const float *)(scores.buffer), num_bboxes, detectionParams,\n            networkInfo.width, networkInfo.height);\n\n    objects.insert(objects.end(), outObjs.begin(), outObjs.end());\n\n    objectList = objects;\n\n    return true;\n}\n\nextern \"C\" bool NvDsInferParseCustomYoloV4(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    return NvDsInferParseYoloV4(\n        outputLayersInfo, networkInfo, detectionParams, objectList);\n}\n\nextern \"C\" bool NvDsInferParseCustomYoloV3(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    static const std::vector<float> kANCHORS = {\n        10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0,\n        45.0, 59.0, 119.0, 116.0, 90.0, 156.0, 198.0, 373.0, 326.0};\n    static const std::vector<std::vector<int>> kMASKS = {\n        {6, 7, 8},\n        {3, 4, 5},\n        {0, 1, 2}};\n    return NvDsInferParseYoloV3(\n        outputLayersInfo, networkInfo, detectionParams, objectList,\n        kANCHORS, kMASKS);\n}\n\nextern \"C\" bool NvDsInferParseCustomYoloV3Tiny(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    static const std::vector<float> kANCHORS = {\n        10, 14, 23, 27, 37, 58, 81, 82, 135, 169, 344, 319};\n    static const std::vector<std::vector<int>> kMASKS = {\n        {3, 4, 5},\n        //{0, 1, 2}}; // as per output result, select {1,2,3}\n        {1, 2, 3}};\n\n    return NvDsInferParseYoloV3(\n        outputLayersInfo, networkInfo, detectionParams, objectList,\n        kANCHORS, kMASKS);\n}\n\nstatic bool NvDsInferParseYoloV2(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    // copy anchor data from yolov2.cfg file\n    std::vector<float> anchors = {0.57273, 0.677385, 1.87446, 2.06253, 3.33843,\n                                  5.47434, 7.88282, 3.52778, 9.77052, 9.16828};\n    const uint kNUM_BBOXES = 5;\n\n    if (outputLayersInfo.empty())\n    {\n        std::cerr << \"Could not find output layer in bbox parsing\" << std::endl;\n        ;\n        return false;\n    }\n    const NvDsInferLayerInfo &layer = outputLayersInfo[0];\n\n    if (NUM_CLASSES_YOLO != detectionParams.numClassesConfigured)\n    {\n        std::cerr << \"WARNING: Num classes mismatch. Configured:\"\n                  << detectionParams.numClassesConfigured\n                  << \", detected by network: \" << NUM_CLASSES_YOLO << std::endl;\n    }\n\n    assert(layer.inferDims.numDims == 3);\n    const uint gridSizeH = layer.inferDims.d[1];\n    const uint gridSizeW = layer.inferDims.d[2];\n    const uint stride = DIVUP(networkInfo.width, gridSizeW);\n    assert(stride == DIVUP(networkInfo.height, gridSizeH));\n    for (auto &anchor : anchors)\n    {\n        anchor *= stride;\n    }\n    std::vector<NvDsInferParseObjectInfo> objects =\n        decodeYoloV2Tensor((const float *)(layer.buffer), anchors, gridSizeW, gridSizeH, stride, kNUM_BBOXES,\n                           NUM_CLASSES_YOLO, networkInfo.width, networkInfo.height);\n\n    objectList = objects;\n\n    return true;\n}\n\nextern \"C\" bool NvDsInferParseCustomYoloV2(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    return NvDsInferParseYoloV2(\n        outputLayersInfo, networkInfo, detectionParams, objectList);\n}\n\nextern \"C\" bool NvDsInferParseCustomYoloV2Tiny(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n    return NvDsInferParseYoloV2(\n        outputLayersInfo, networkInfo, detectionParams, objectList);\n}\n\nextern \"C\" bool NvDsInferParseCustomYoloTLT(\n    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,\n    NvDsInferNetworkInfo const &networkInfo,\n    NvDsInferParseDetectionParams const &detectionParams,\n    std::vector<NvDsInferParseObjectInfo> &objectList)\n{\n\n    if (outputLayersInfo.size() != 4)\n    {\n        std::cerr << \"Mismatch in the number of output buffers.\"\n                  << \"Expected 4 output buffers, detected in the network :\"\n                  << outputLayersInfo.size() << std::endl;\n        return false;\n    }\n\n    const int topK = 200;\n    const int *keepCount = static_cast<const int *>(outputLayersInfo.at(0).buffer);\n    const float *boxes = static_cast<const float *>(outputLayersInfo.at(1).buffer);\n    const float *scores = static_cast<const float *>(outputLayersInfo.at(2).buffer);\n    const float *cls = static_cast<const float *>(outputLayersInfo.at(3).buffer);\n\n    for (int i = 0; (i < keepCount[0]) && (objectList.size() <= topK); ++i)\n    {\n        const float *loc = &boxes[0] + (i * 4);\n        const float *conf = &scores[0] + i;\n        const float *cls_id = &cls[0] + i;\n\n        if (conf[0] > 1.001)\n            continue;\n\n        if ((loc[0] < 0) || (loc[1] < 0) || (loc[2] < 0) || (loc[3] < 0))\n            continue;\n\n        if ((loc[0] > networkInfo.width) || (loc[2] > networkInfo.width) || (loc[1] > networkInfo.height) || (loc[3] > networkInfo.width))\n            continue;\n\n        if ((loc[2] < loc[0]) || (loc[3] < loc[1]))\n            continue;\n\n        if (((loc[3] - loc[1]) > networkInfo.height) || ((loc[2] - loc[0]) > networkInfo.width))\n            continue;\n\n        NvDsInferParseObjectInfo curObj{static_cast<unsigned int>(cls_id[0]),\n                                        loc[0], loc[1], (loc[2] - loc[0]),\n                                        (loc[3] - loc[1]), conf[0]};\n        objectList.push_back(curObj);\n    }\n\n    return true;\n}\n\n/* Check that the custom function has been defined correctly */\nCHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV4);\nCHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3);\nCHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV3Tiny);\nCHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2);\nCHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloV2Tiny);\nCHECK_CUSTOM_PARSE_FUNC_PROTOTYPE(NvDsInferParseCustomYoloTLT);\n"
  },
  {
    "path": "deepstream_plugin_yolov4/trt_utils.cpp",
    "content": "/*\n * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#include \"trt_utils.h\"\n\n#include <experimental/filesystem>\n#include <fstream>\n#include <iomanip>\n#include <functional>\n#include <algorithm>\n#include <math.h>\n\n#include \"NvInferPlugin.h\"\n\nstatic void leftTrim(std::string& s)\n{\n    s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int ch) { return !isspace(ch); }));\n}\n\nstatic void rightTrim(std::string& s)\n{\n    s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !isspace(ch); }).base(), s.end());\n}\n\nstd::string trim(std::string s)\n{\n    leftTrim(s);\n    rightTrim(s);\n    return s;\n}\n\nfloat clamp(const float val, const float minVal, const float maxVal)\n{\n    assert(minVal <= maxVal);\n    return std::min(maxVal, std::max(minVal, val));\n}\n\nbool fileExists(const std::string fileName, bool verbose)\n{\n    if (!std::experimental::filesystem::exists(std::experimental::filesystem::path(fileName)))\n    {\n        if (verbose) std::cout << \"File does not exist : \" << fileName << std::endl;\n        return false;\n    }\n    return true;\n}\n\nstd::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType)\n{\n    assert(fileExists(weightsFilePath));\n    std::cout << \"Loading pre-trained weights...\" << std::endl;\n    std::ifstream file(weightsFilePath, std::ios_base::binary);\n    assert(file.good());\n    std::string line;\n\n    if (networkType == \"yolov2\")\n    {\n        // Remove 4 int32 bytes of data from the stream belonging to the header\n        file.ignore(4 * 4);\n    }\n    else if ((networkType == \"yolov3\") || (networkType == \"yolov3-tiny\")\n             || (networkType == \"yolov2-tiny\"))\n    {\n        // Remove 5 int32 bytes of data from the stream belonging to the header\n        file.ignore(4 * 5);\n    }\n    else\n    {\n        std::cout << \"Invalid network type\" << std::endl;\n        assert(0);\n    }\n\n    std::vector<float> weights;\n    char floatWeight[4];\n    while (!file.eof())\n    {\n        file.read(floatWeight, 4);\n        assert(file.gcount() == 4);\n        weights.push_back(*reinterpret_cast<float*>(floatWeight));\n        if (file.peek() == std::istream::traits_type::eof()) break;\n    }\n    std::cout << \"Loading weights of \" << networkType << \" complete!\"\n              << std::endl;\n    std::cout << \"Total Number of weights read : \" << weights.size() << std::endl;\n    return weights;\n}\n\nstd::string dimsToString(const nvinfer1::Dims d)\n{\n    std::stringstream s;\n    assert(d.nbDims >= 1);\n    for (int i = 0; i < d.nbDims - 1; ++i)\n    {\n        s << std::setw(4) << d.d[i] << \" x\";\n    }\n    s << std::setw(4) << d.d[d.nbDims - 1];\n\n    return s.str();\n}\n\nvoid displayDimType(const nvinfer1::Dims d)\n{\n    std::cout << \"(\" << d.nbDims << \") \";\n    for (int i = 0; i < d.nbDims; ++i)\n    {\n        switch (d.type[i])\n        {\n        case nvinfer1::DimensionType::kSPATIAL: std::cout << \"kSPATIAL \"; break;\n        case nvinfer1::DimensionType::kCHANNEL: std::cout << \"kCHANNEL \"; break;\n        case nvinfer1::DimensionType::kINDEX: std::cout << \"kINDEX \"; break;\n        case nvinfer1::DimensionType::kSEQUENCE: std::cout << \"kSEQUENCE \"; break;\n        }\n    }\n    std::cout << std::endl;\n}\n\nint getNumChannels(nvinfer1::ITensor* t)\n{\n    nvinfer1::Dims d = t->getDimensions();\n    assert(d.nbDims == 3);\n\n    return d.d[0];\n}\n\nuint64_t get3DTensorVolume(nvinfer1::Dims inputDims)\n{\n    assert(inputDims.nbDims == 3);\n    return inputDims.d[0] * inputDims.d[1] * inputDims.d[2];\n}\n\nnvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,\n                                nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network)\n{\n    assert(block.at(\"type\") == \"maxpool\");\n    assert(block.find(\"size\") != block.end());\n    assert(block.find(\"stride\") != block.end());\n\n    int size = std::stoi(block.at(\"size\"));\n    int stride = std::stoi(block.at(\"stride\"));\n\n    nvinfer1::IPoolingLayer* pool\n        = network->addPooling(*input, nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{size, size});\n    assert(pool);\n    std::string maxpoolLayerName = \"maxpool_\" + std::to_string(layerIdx);\n    pool->setStride(nvinfer1::DimsHW{stride, stride});\n    pool->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);\n    pool->setName(maxpoolLayerName.c_str());\n\n    return pool;\n}\n\nnvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,\n                                   std::vector<float>& weights,\n                                   std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,\n                                   int& inputChannels, nvinfer1::ITensor* input,\n                                   nvinfer1::INetworkDefinition* network)\n{\n    assert(block.at(\"type\") == \"convolutional\");\n    assert(block.find(\"batch_normalize\") == block.end());\n    assert(block.at(\"activation\") == \"linear\");\n    assert(block.find(\"filters\") != block.end());\n    assert(block.find(\"pad\") != block.end());\n    assert(block.find(\"size\") != block.end());\n    assert(block.find(\"stride\") != block.end());\n\n    int filters = std::stoi(block.at(\"filters\"));\n    int padding = std::stoi(block.at(\"pad\"));\n    int kernelSize = std::stoi(block.at(\"size\"));\n    int stride = std::stoi(block.at(\"stride\"));\n    int pad;\n    if (padding)\n        pad = (kernelSize - 1) / 2;\n    else\n        pad = 0;\n    // load the convolution layer bias\n    nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, filters};\n    float* val = new float[filters];\n    for (int i = 0; i < filters; ++i)\n    {\n        val[i] = weights[weightPtr];\n        weightPtr++;\n    }\n    convBias.values = val;\n    trtWeights.push_back(convBias);\n    // load the convolutional layer weights\n    int size = filters * inputChannels * kernelSize * kernelSize;\n    nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};\n    val = new float[size];\n    for (int i = 0; i < size; ++i)\n    {\n        val[i] = weights[weightPtr];\n        weightPtr++;\n    }\n    convWt.values = val;\n    trtWeights.push_back(convWt);\n    nvinfer1::IConvolutionLayer* conv = network->addConvolution(\n        *input, filters, nvinfer1::DimsHW{kernelSize, kernelSize}, convWt, convBias);\n    assert(conv != nullptr);\n    std::string convLayerName = \"conv_\" + std::to_string(layerIdx);\n    conv->setName(convLayerName.c_str());\n    conv->setStride(nvinfer1::DimsHW{stride, stride});\n    conv->setPadding(nvinfer1::DimsHW{pad, pad});\n\n    return conv;\n}\n\nnvinfer1::ILayer* netAddConvBNLeaky(int layerIdx, std::map<std::string, std::string>& block,\n                                    std::vector<float>& weights,\n                                    std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,\n                                    int& inputChannels, nvinfer1::ITensor* input,\n                                    nvinfer1::INetworkDefinition* network)\n{\n    assert(block.at(\"type\") == \"convolutional\");\n    assert(block.find(\"batch_normalize\") != block.end());\n    assert(block.at(\"batch_normalize\") == \"1\");\n    assert(block.at(\"activation\") == \"leaky\");\n    assert(block.find(\"filters\") != block.end());\n    assert(block.find(\"pad\") != block.end());\n    assert(block.find(\"size\") != block.end());\n    assert(block.find(\"stride\") != block.end());\n\n    bool batchNormalize, bias;\n    if (block.find(\"batch_normalize\") != block.end())\n    {\n        batchNormalize = (block.at(\"batch_normalize\") == \"1\");\n        bias = false;\n    }\n    else\n    {\n        batchNormalize = false;\n        bias = true;\n    }\n    // all conv_bn_leaky layers assume bias is false\n    assert(batchNormalize == true && bias == false);\n    UNUSED(batchNormalize);\n    UNUSED(bias);\n\n    int filters = std::stoi(block.at(\"filters\"));\n    int padding = std::stoi(block.at(\"pad\"));\n    int kernelSize = std::stoi(block.at(\"size\"));\n    int stride = std::stoi(block.at(\"stride\"));\n    int pad;\n    if (padding)\n        pad = (kernelSize - 1) / 2;\n    else\n        pad = 0;\n\n    /***** CONVOLUTION LAYER *****/\n    /*****************************/\n    // batch norm weights are before the conv layer\n    // load BN biases (bn_biases)\n    std::vector<float> bnBiases;\n    for (int i = 0; i < filters; ++i)\n    {\n        bnBiases.push_back(weights[weightPtr]);\n        weightPtr++;\n    }\n    // load BN weights\n    std::vector<float> bnWeights;\n    for (int i = 0; i < filters; ++i)\n    {\n        bnWeights.push_back(weights[weightPtr]);\n        weightPtr++;\n    }\n    // load BN running_mean\n    std::vector<float> bnRunningMean;\n    for (int i = 0; i < filters; ++i)\n    {\n        bnRunningMean.push_back(weights[weightPtr]);\n        weightPtr++;\n    }\n    // load BN running_var\n    std::vector<float> bnRunningVar;\n    for (int i = 0; i < filters; ++i)\n    {\n        // 1e-05 for numerical stability\n        bnRunningVar.push_back(sqrt(weights[weightPtr] + 1.0e-5));\n        weightPtr++;\n    }\n    // load Conv layer weights (GKCRS)\n    int size = filters * inputChannels * kernelSize * kernelSize;\n    nvinfer1::Weights convWt{nvinfer1::DataType::kFLOAT, nullptr, size};\n    float* val = new float[size];\n    for (int i = 0; i < size; ++i)\n    {\n        val[i] = weights[weightPtr];\n        weightPtr++;\n    }\n    convWt.values = val;\n    trtWeights.push_back(convWt);\n    nvinfer1::Weights convBias{nvinfer1::DataType::kFLOAT, nullptr, 0};\n    trtWeights.push_back(convBias);\n    nvinfer1::IConvolutionLayer* conv = network->addConvolution(\n        *input, filters, nvinfer1::DimsHW{kernelSize, kernelSize}, convWt, convBias);\n    assert(conv != nullptr);\n    std::string convLayerName = \"conv_\" + std::to_string(layerIdx);\n    conv->setName(convLayerName.c_str());\n    conv->setStride(nvinfer1::DimsHW{stride, stride});\n    conv->setPadding(nvinfer1::DimsHW{pad, pad});\n\n    /***** BATCHNORM LAYER *****/\n    /***************************/\n    size = filters;\n    // create the weights\n    nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, nullptr, size};\n    nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, nullptr, size};\n    nvinfer1::Weights power{nvinfer1::DataType::kFLOAT, nullptr, size};\n    float* shiftWt = new float[size];\n    for (int i = 0; i < size; ++i)\n    {\n        shiftWt[i]\n            = bnBiases.at(i) - ((bnRunningMean.at(i) * bnWeights.at(i)) / bnRunningVar.at(i));\n    }\n    shift.values = shiftWt;\n    float* scaleWt = new float[size];\n    for (int i = 0; i < size; ++i)\n    {\n        scaleWt[i] = bnWeights.at(i) / bnRunningVar[i];\n    }\n    scale.values = scaleWt;\n    float* powerWt = new float[size];\n    for (int i = 0; i < size; ++i)\n    {\n        powerWt[i] = 1.0;\n    }\n    power.values = powerWt;\n    trtWeights.push_back(shift);\n    trtWeights.push_back(scale);\n    trtWeights.push_back(power);\n    // Add the batch norm layers\n    nvinfer1::IScaleLayer* bn = network->addScale(\n        *conv->getOutput(0), nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);\n    assert(bn != nullptr);\n    std::string bnLayerName = \"batch_norm_\" + std::to_string(layerIdx);\n    bn->setName(bnLayerName.c_str());\n    /***** ACTIVATION LAYER *****/\n    /****************************/\n    nvinfer1::ITensor* bnOutput = bn->getOutput(0);\n    nvinfer1::IActivationLayer* leaky = network->addActivation(\n        *bnOutput, nvinfer1::ActivationType::kLEAKY_RELU);\n    leaky->setAlpha(0.1);\n    assert(leaky != nullptr);\n    std::string leakyLayerName = \"leaky_\" + std::to_string(layerIdx);\n    leaky->setName(leakyLayerName.c_str());\n\n    return leaky;\n}\n\nnvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,\n                                 std::vector<float>& weights,\n                                 std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,\n                                 nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network)\n{\n    assert(block.at(\"type\") == \"upsample\");\n    nvinfer1::Dims inpDims = input->getDimensions();\n    assert(inpDims.nbDims == 3);\n    assert(inpDims.d[1] == inpDims.d[2]);\n    int h = inpDims.d[1];\n    int w = inpDims.d[2];\n    int stride = std::stoi(block.at(\"stride\"));\n    // add pre multiply matrix as a constant\n    nvinfer1::Dims preDims{3,\n                           {1, stride * h, w},\n                           {nvinfer1::DimensionType::kCHANNEL, nvinfer1::DimensionType::kSPATIAL,\n                            nvinfer1::DimensionType::kSPATIAL}};\n    int size = stride * h * w;\n    nvinfer1::Weights preMul{nvinfer1::DataType::kFLOAT, nullptr, size};\n    float* preWt = new float[size];\n    /* (2*h * w)\n    [ [1, 0, ..., 0],\n      [1, 0, ..., 0],\n      [0, 1, ..., 0],\n      [0, 1, ..., 0],\n      ...,\n      ...,\n      [0, 0, ..., 1],\n      [0, 0, ..., 1] ]\n    */\n    for (int i = 0, idx = 0; i < h; ++i)\n    {\n        for (int s = 0; s < stride; ++s)\n        {\n            for (int j = 0; j < w; ++j, ++idx)\n            {\n                preWt[idx] = (i == j) ? 1.0 : 0.0;\n            }\n        }\n    }\n    preMul.values = preWt;\n    trtWeights.push_back(preMul);\n    nvinfer1::IConstantLayer* preM = network->addConstant(preDims, preMul);\n    assert(preM != nullptr);\n    std::string preLayerName = \"preMul_\" + std::to_string(layerIdx);\n    preM->setName(preLayerName.c_str());\n    // add post multiply matrix as a constant\n    nvinfer1::Dims postDims{3,\n                            {1, h, stride * w},\n                            {nvinfer1::DimensionType::kCHANNEL, nvinfer1::DimensionType::kSPATIAL,\n                             nvinfer1::DimensionType::kSPATIAL}};\n    size = stride * h * w;\n    nvinfer1::Weights postMul{nvinfer1::DataType::kFLOAT, nullptr, size};\n    float* postWt = new float[size];\n    /* (h * 2*w)\n    [ [1, 1, 0, 0, ..., 0, 0],\n      [0, 0, 1, 1, ..., 0, 0],\n      ...,\n      ...,\n      [0, 0, 0, 0, ..., 1, 1] ]\n    */\n    for (int i = 0, idx = 0; i < h; ++i)\n    {\n        for (int j = 0; j < stride * w; ++j, ++idx)\n        {\n            postWt[idx] = (j / stride == i) ? 1.0 : 0.0;\n        }\n    }\n    postMul.values = postWt;\n    trtWeights.push_back(postMul);\n    nvinfer1::IConstantLayer* post_m = network->addConstant(postDims, postMul);\n    assert(post_m != nullptr);\n    std::string postLayerName = \"postMul_\" + std::to_string(layerIdx);\n    post_m->setName(postLayerName.c_str());\n    // add matrix multiply layers for upsampling\n    nvinfer1::IMatrixMultiplyLayer* mm1\n        = network->addMatrixMultiply(*preM->getOutput(0), nvinfer1::MatrixOperation::kNONE, *input,\n                                     nvinfer1::MatrixOperation::kNONE);\n    assert(mm1 != nullptr);\n    std::string mm1LayerName = \"mm1_\" + std::to_string(layerIdx);\n    mm1->setName(mm1LayerName.c_str());\n    nvinfer1::IMatrixMultiplyLayer* mm2\n        = network->addMatrixMultiply(*mm1->getOutput(0), nvinfer1::MatrixOperation::kNONE,\n                                     *post_m->getOutput(0), nvinfer1::MatrixOperation::kNONE);\n    assert(mm2 != nullptr);\n    std::string mm2LayerName = \"mm2_\" + std::to_string(layerIdx);\n    mm2->setName(mm2LayerName.c_str());\n    return mm2;\n}\n\nvoid printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,\n                    std::string layerOutput, std::string weightPtr)\n{\n    std::cout << std::setw(6) << std::left << layerIndex << std::setw(15) << std::left << layerName;\n    std::cout << std::setw(20) << std::left << layerInput << std::setw(20) << std::left\n              << layerOutput;\n    std::cout << std::setw(6) << std::left << weightPtr << std::endl;\n}\n"
  },
  {
    "path": "deepstream_plugin_yolov4/trt_utils.h",
    "content": "/*\n * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n\n#ifndef __TRT_UTILS_H__\n#define __TRT_UTILS_H__\n\n#include <set>\n#include <map>\n#include <string>\n#include <vector>\n#include <cassert>\n#include <iostream>\n#include <fstream>\n\n#include \"NvInfer.h\"\n\n#define UNUSED(expr) (void)(expr)\n#define DIVUP(n, d) ((n) + (d)-1) / (d)\n\nstd::string trim(std::string s);\nfloat clamp(const float val, const float minVal, const float maxVal);\nbool fileExists(const std::string fileName, bool verbose = true);\nstd::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType);\nstd::string dimsToString(const nvinfer1::Dims d);\nvoid displayDimType(const nvinfer1::Dims d);\nint getNumChannels(nvinfer1::ITensor* t);\nuint64_t get3DTensorVolume(nvinfer1::Dims inputDims);\n\n// Helper functions to create yolo engine\nnvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,\n                                nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);\nnvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,\n                                   std::vector<float>& weights,\n                                   std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,\n                                   int& inputChannels, nvinfer1::ITensor* input,\n                                   nvinfer1::INetworkDefinition* network);\nnvinfer1::ILayer* netAddConvBNLeaky(int layerIdx, std::map<std::string, std::string>& block,\n                                    std::vector<float>& weights,\n                                    std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,\n                                    int& inputChannels, nvinfer1::ITensor* input,\n                                    nvinfer1::INetworkDefinition* network);\nnvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,\n                                 std::vector<float>& weights,\n                                 std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,\n                                 nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);\nvoid printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,\n                    std::string layerOutput, std::string weightPtr);\n\n#endif\n"
  },
  {
    "path": "deepstream_plugin_yolov4/yolo.cpp",
    "content": "/*\n * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#include \"yolo.h\"\n#include \"yoloPlugins.h\"\n\n#include <fstream>\n#include <iomanip>\n#include <iterator>\n\nYolo::Yolo(const NetworkInfo& networkInfo)\n    : m_NetworkType(networkInfo.networkType), // yolov3\n      m_ConfigFilePath(networkInfo.configFilePath), // yolov3.cfg\n      m_WtsFilePath(networkInfo.wtsFilePath), // yolov3.weights\n      m_DeviceType(networkInfo.deviceType), // kDLA, kGPU\n      m_InputBlobName(networkInfo.inputBlobName), // data\n      m_InputH(0),\n      m_InputW(0),\n      m_InputC(0),\n      m_InputSize(0)\n{}\n\nYolo::~Yolo()\n{\n    destroyNetworkUtils();\n}\n\nnvinfer1::ICudaEngine *Yolo::createEngine (nvinfer1::IBuilder* builder)\n{\n    assert (builder);\n\n    std::vector<float> weights = loadWeights(m_WtsFilePath, m_NetworkType);\n    std::vector<nvinfer1::Weights> trtWeights;\n\n    nvinfer1::INetworkDefinition *network = builder->createNetwork();\n    if (parseModel(*network) != NVDSINFER_SUCCESS) {\n        network->destroy();\n        return nullptr;\n    }\n\n    // Build the engine\n    std::cout << \"Building the TensorRT Engine...\" << std::endl;\n    nvinfer1::ICudaEngine * engine = builder->buildCudaEngine(*network);\n    if (engine) {\n        std::cout << \"Building complete!\" << std::endl;\n    } else {\n        std::cerr << \"Building engine failed!\" << std::endl;\n    }\n\n    // destroy\n    network->destroy();\n    return engine;\n}\n\nNvDsInferStatus Yolo::parseModel(nvinfer1::INetworkDefinition& network) {\n    destroyNetworkUtils();\n\n    m_ConfigBlocks = parseConfigFile(m_ConfigFilePath);\n    parseConfigBlocks();\n\n    std::vector<float> weights = loadWeights(m_WtsFilePath, m_NetworkType);\n    // build yolo network\n    std::cout << \"Building Yolo network...\" << std::endl;\n    NvDsInferStatus status = buildYoloNetwork(weights, network);\n\n    if (status == NVDSINFER_SUCCESS) {\n        std::cout << \"Building yolo network complete!\" << std::endl;\n    } else {\n        std::cerr << \"Building yolo network failed!\" << std::endl;\n    }\n\n    return status;\n}\n\nNvDsInferStatus Yolo::buildYoloNetwork(\n    std::vector<float>& weights, nvinfer1::INetworkDefinition& network) {\n    int weightPtr = 0;\n    int channels = m_InputC;\n\n    nvinfer1::ITensor* data =\n        network.addInput(m_InputBlobName.c_str(), nvinfer1::DataType::kFLOAT,\n            nvinfer1::DimsCHW{static_cast<int>(m_InputC),\n                static_cast<int>(m_InputH), static_cast<int>(m_InputW)});\n    assert(data != nullptr && data->getDimensions().nbDims > 0);\n\n    nvinfer1::ITensor* previous = data;\n    std::vector<nvinfer1::ITensor*> tensorOutputs;\n    uint outputTensorCount = 0;\n\n    // build the network using the network API\n    for (uint i = 0; i < m_ConfigBlocks.size(); ++i) {\n        // check if num. of channels is correct\n        assert(getNumChannels(previous) == channels);\n        std::string layerIndex = \"(\" + std::to_string(tensorOutputs.size()) + \")\";\n\n        if (m_ConfigBlocks.at(i).at(\"type\") == \"net\") {\n            printLayerInfo(\"\", \"layer\", \"     inp_size\", \"     out_size\", \"weightPtr\");\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"convolutional\") {\n            std::string inputVol = dimsToString(previous->getDimensions());\n            nvinfer1::ILayer* out;\n            std::string layerType;\n            // check if batch_norm enabled\n            if (m_ConfigBlocks.at(i).find(\"batch_normalize\") !=\n                m_ConfigBlocks.at(i).end()) {\n                out = netAddConvBNLeaky(i, m_ConfigBlocks.at(i), weights,\n                    m_TrtWeights, weightPtr, channels, previous, &network);\n                layerType = \"conv-bn-leaky\";\n            }\n            else\n            {\n                out = netAddConvLinear(i, m_ConfigBlocks.at(i), weights,\n                    m_TrtWeights, weightPtr, channels, previous, &network);\n                layerType = \"conv-linear\";\n            }\n            previous = out->getOutput(0);\n            assert(previous != nullptr);\n            channels = getNumChannels(previous);\n            std::string outputVol = dimsToString(previous->getDimensions());\n            tensorOutputs.push_back(out->getOutput(0));\n            printLayerInfo(layerIndex, layerType, inputVol, outputVol, std::to_string(weightPtr));\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"shortcut\") {\n            assert(m_ConfigBlocks.at(i).at(\"activation\") == \"linear\");\n            assert(m_ConfigBlocks.at(i).find(\"from\") !=\n                   m_ConfigBlocks.at(i).end());\n            int from = stoi(m_ConfigBlocks.at(i).at(\"from\"));\n\n            std::string inputVol = dimsToString(previous->getDimensions());\n            // check if indexes are correct\n            assert((i - 2 >= 0) && (i - 2 < tensorOutputs.size()));\n            assert((i + from - 1 >= 0) && (i + from - 1 < tensorOutputs.size()));\n            assert(i + from - 1 < i - 2);\n            nvinfer1::IElementWiseLayer* ew = network.addElementWise(\n                *tensorOutputs[i - 2], *tensorOutputs[i + from - 1],\n                nvinfer1::ElementWiseOperation::kSUM);\n            assert(ew != nullptr);\n            std::string ewLayerName = \"shortcut_\" + std::to_string(i);\n            ew->setName(ewLayerName.c_str());\n            previous = ew->getOutput(0);\n            assert(previous != nullptr);\n            std::string outputVol = dimsToString(previous->getDimensions());\n            tensorOutputs.push_back(ew->getOutput(0));\n            printLayerInfo(layerIndex, \"skip\", inputVol, outputVol, \"    -\");\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"yolo\") {\n            nvinfer1::Dims prevTensorDims = previous->getDimensions();\n            assert(prevTensorDims.d[1] == prevTensorDims.d[2]);\n            TensorInfo& curYoloTensor = m_OutputTensors.at(outputTensorCount);\n            curYoloTensor.gridSize = prevTensorDims.d[1];\n            curYoloTensor.stride = m_InputW / curYoloTensor.gridSize;\n            m_OutputTensors.at(outputTensorCount).volume = curYoloTensor.gridSize\n                * curYoloTensor.gridSize\n                * (curYoloTensor.numBBoxes * (5 + curYoloTensor.numClasses));\n            std::string layerName = \"yolo_\" + std::to_string(i);\n            curYoloTensor.blobName = layerName;\n            nvinfer1::IPluginV2* yoloPlugin\n                = new YoloLayerV3(m_OutputTensors.at(outputTensorCount).numBBoxes,\n                                  m_OutputTensors.at(outputTensorCount).numClasses,\n                                  m_OutputTensors.at(outputTensorCount).gridSize);\n            assert(yoloPlugin != nullptr);\n            nvinfer1::IPluginV2Layer* yolo =\n                network.addPluginV2(&previous, 1, *yoloPlugin);\n            assert(yolo != nullptr);\n            yolo->setName(layerName.c_str());\n            std::string inputVol = dimsToString(previous->getDimensions());\n            previous = yolo->getOutput(0);\n            assert(previous != nullptr);\n            previous->setName(layerName.c_str());\n            std::string outputVol = dimsToString(previous->getDimensions());\n            network.markOutput(*previous);\n            channels = getNumChannels(previous);\n            tensorOutputs.push_back(yolo->getOutput(0));\n            printLayerInfo(layerIndex, \"yolo\", inputVol, outputVol, std::to_string(weightPtr));\n            ++outputTensorCount;\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"region\") {\n            nvinfer1::Dims prevTensorDims = previous->getDimensions();\n            assert(prevTensorDims.d[1] == prevTensorDims.d[2]);\n            TensorInfo& curRegionTensor = m_OutputTensors.at(outputTensorCount);\n            curRegionTensor.gridSize = prevTensorDims.d[1];\n            curRegionTensor.stride = m_InputW / curRegionTensor.gridSize;\n            m_OutputTensors.at(outputTensorCount).volume = curRegionTensor.gridSize\n                * curRegionTensor.gridSize\n                * (curRegionTensor.numBBoxes * (5 + curRegionTensor.numClasses));\n            std::string layerName = \"region_\" + std::to_string(i);\n            curRegionTensor.blobName = layerName;\n            nvinfer1::plugin::RegionParameters RegionParameters{\n                static_cast<int>(curRegionTensor.numBBoxes), 4,\n                static_cast<int>(curRegionTensor.numClasses), nullptr};\n            std::string inputVol = dimsToString(previous->getDimensions());\n            nvinfer1::IPluginV2* regionPlugin\n                = createRegionPlugin(RegionParameters);\n            assert(regionPlugin != nullptr);\n            nvinfer1::IPluginV2Layer* region =\n                network.addPluginV2(&previous, 1, *regionPlugin);\n            assert(region != nullptr);\n            region->setName(layerName.c_str());\n            previous = region->getOutput(0);\n            assert(previous != nullptr);\n            previous->setName(layerName.c_str());\n            std::string outputVol = dimsToString(previous->getDimensions());\n            network.markOutput(*previous);\n            channels = getNumChannels(previous);\n            tensorOutputs.push_back(region->getOutput(0));\n            printLayerInfo(layerIndex, \"region\", inputVol, outputVol, std::to_string(weightPtr));\n            std::cout << \"Anchors are being converted to network input resolution i.e. Anchors x \"\n                      << curRegionTensor.stride << \" (stride)\" << std::endl;\n            for (auto& anchor : curRegionTensor.anchors) anchor *= curRegionTensor.stride;\n            ++outputTensorCount;\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"reorg\") {\n            std::string inputVol = dimsToString(previous->getDimensions());\n            nvinfer1::IPluginV2* reorgPlugin = createReorgPlugin(2);\n            assert(reorgPlugin != nullptr);\n            nvinfer1::IPluginV2Layer* reorg =\n                network.addPluginV2(&previous, 1, *reorgPlugin);\n            assert(reorg != nullptr);\n\n            std::string layerName = \"reorg_\" + std::to_string(i);\n            reorg->setName(layerName.c_str());\n            previous = reorg->getOutput(0);\n            assert(previous != nullptr);\n            std::string outputVol = dimsToString(previous->getDimensions());\n            channels = getNumChannels(previous);\n            tensorOutputs.push_back(reorg->getOutput(0));\n            printLayerInfo(layerIndex, \"reorg\", inputVol, outputVol, std::to_string(weightPtr));\n        }\n        // route layers (single or concat)\n        else if (m_ConfigBlocks.at(i).at(\"type\") == \"route\") {\n            std::string strLayers = m_ConfigBlocks.at(i).at(\"layers\");\n            std::vector<int> idxLayers;\n            size_t lastPos = 0, pos = 0;\n            while ((pos = strLayers.find(',', lastPos)) != std::string::npos) {\n                int vL = std::stoi(trim(strLayers.substr(lastPos, pos - lastPos)));\n                idxLayers.push_back (vL);\n                lastPos = pos + 1;\n            }\n            if (lastPos < strLayers.length()) {\n                std::string lastV = trim(strLayers.substr(lastPos));\n                if (!lastV.empty()) {\n                    idxLayers.push_back (std::stoi(lastV));\n                }\n            }\n            assert (!idxLayers.empty());\n            std::vector<nvinfer1::ITensor*> concatInputs;\n            for (int idxLayer : idxLayers) {\n                if (idxLayer < 0) {\n                    idxLayer = tensorOutputs.size() + idxLayer;\n                }\n                assert (idxLayer >= 0 && idxLayer < (int)tensorOutputs.size());\n                concatInputs.push_back (tensorOutputs[idxLayer]);\n            }\n            nvinfer1::IConcatenationLayer* concat =\n                network.addConcatenation(concatInputs.data(), concatInputs.size());\n            assert(concat != nullptr);\n            std::string concatLayerName = \"route_\" + std::to_string(i - 1);\n            concat->setName(concatLayerName.c_str());\n            // concatenate along the channel dimension\n            concat->setAxis(0);\n            previous = concat->getOutput(0);\n            assert(previous != nullptr);\n            std::string outputVol = dimsToString(previous->getDimensions());\n            // set the output volume depth\n            channels\n                = getNumChannels(previous);\n            tensorOutputs.push_back(concat->getOutput(0));\n            printLayerInfo(layerIndex, \"route\", \"        -\", outputVol,\n                           std::to_string(weightPtr));\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"upsample\") {\n            std::string inputVol = dimsToString(previous->getDimensions());\n            nvinfer1::ILayer* out = netAddUpsample(i - 1, m_ConfigBlocks[i],\n                weights, m_TrtWeights, channels, previous, &network);\n            previous = out->getOutput(0);\n            std::string outputVol = dimsToString(previous->getDimensions());\n            tensorOutputs.push_back(out->getOutput(0));\n            printLayerInfo(layerIndex, \"upsample\", inputVol, outputVol, \"    -\");\n        } else if (m_ConfigBlocks.at(i).at(\"type\") == \"maxpool\") {\n            std::string inputVol = dimsToString(previous->getDimensions());\n            nvinfer1::ILayer* out =\n                netAddMaxpool(i, m_ConfigBlocks.at(i), previous, &network);\n            previous = out->getOutput(0);\n            assert(previous != nullptr);\n            std::string outputVol = dimsToString(previous->getDimensions());\n            tensorOutputs.push_back(out->getOutput(0));\n            printLayerInfo(layerIndex, \"maxpool\", inputVol, outputVol, std::to_string(weightPtr));\n        }\n        else\n        {\n            std::cout << \"Unsupported layer type --> \\\"\"\n                      << m_ConfigBlocks.at(i).at(\"type\") << \"\\\"\" << std::endl;\n            assert(0);\n        }\n    }\n\n    if ((int)weights.size() != weightPtr)\n    {\n        std::cout << \"Number of unused weights left : \" << weights.size() - weightPtr << std::endl;\n        assert(0);\n    }\n\n    std::cout << \"Output yolo blob names :\" << std::endl;\n    for (auto& tensor : m_OutputTensors) {\n        std::cout << tensor.blobName << std::endl;\n    }\n\n    int nbLayers = network.getNbLayers();\n    std::cout << \"Total number of yolo layers: \" << nbLayers << std::endl;\n\n    return NVDSINFER_SUCCESS;\n}\n\nstd::vector<std::map<std::string, std::string>>\nYolo::parseConfigFile (const std::string cfgFilePath)\n{\n    assert(fileExists(cfgFilePath));\n    std::ifstream file(cfgFilePath);\n    assert(file.good());\n    std::string line;\n    std::vector<std::map<std::string, std::string>> blocks;\n    std::map<std::string, std::string> block;\n\n    while (getline(file, line))\n    {\n        if (line.size() == 0) continue;\n        if (line.front() == '#') continue;\n        line = trim(line);\n        if (line.front() == '[')\n        {\n            if (block.size() > 0)\n            {\n                blocks.push_back(block);\n                block.clear();\n            }\n            std::string key = \"type\";\n            std::string value = trim(line.substr(1, line.size() - 2));\n            block.insert(std::pair<std::string, std::string>(key, value));\n        }\n        else\n        {\n            int cpos = line.find('=');\n            std::string key = trim(line.substr(0, cpos));\n            std::string value = trim(line.substr(cpos + 1));\n            block.insert(std::pair<std::string, std::string>(key, value));\n        }\n    }\n    blocks.push_back(block);\n    return blocks;\n}\n\nvoid Yolo::parseConfigBlocks()\n{\n    for (auto block : m_ConfigBlocks) {\n        if (block.at(\"type\") == \"net\")\n        {\n            assert((block.find(\"height\") != block.end())\n                   && \"Missing 'height' param in network cfg\");\n            assert((block.find(\"width\") != block.end()) && \"Missing 'width' param in network cfg\");\n            assert((block.find(\"channels\") != block.end())\n                   && \"Missing 'channels' param in network cfg\");\n\n            m_InputH = std::stoul(block.at(\"height\"));\n            m_InputW = std::stoul(block.at(\"width\"));\n            m_InputC = std::stoul(block.at(\"channels\"));\n            assert(m_InputW == m_InputH);\n            m_InputSize = m_InputC * m_InputH * m_InputW;\n        }\n        else if ((block.at(\"type\") == \"region\") || (block.at(\"type\") == \"yolo\"))\n        {\n            assert((block.find(\"num\") != block.end())\n                   && std::string(\"Missing 'num' param in \" + block.at(\"type\") + \" layer\").c_str());\n            assert((block.find(\"classes\") != block.end())\n                   && std::string(\"Missing 'classes' param in \" + block.at(\"type\") + \" layer\")\n                          .c_str());\n            assert((block.find(\"anchors\") != block.end())\n                   && std::string(\"Missing 'anchors' param in \" + block.at(\"type\") + \" layer\")\n                          .c_str());\n\n            TensorInfo outputTensor;\n            std::string anchorString = block.at(\"anchors\");\n            while (!anchorString.empty())\n            {\n                int npos = anchorString.find_first_of(',');\n                if (npos != -1)\n                {\n                    float anchor = std::stof(trim(anchorString.substr(0, npos)));\n                    outputTensor.anchors.push_back(anchor);\n                    anchorString.erase(0, npos + 1);\n                }\n                else\n                {\n                    float anchor = std::stof(trim(anchorString));\n                    outputTensor.anchors.push_back(anchor);\n                    break;\n                }\n            }\n\n            if ((m_NetworkType == \"yolov3\") || (m_NetworkType == \"yolov3-tiny\"))\n            {\n                assert((block.find(\"mask\") != block.end())\n                       && std::string(\"Missing 'mask' param in \" + block.at(\"type\") + \" layer\")\n                              .c_str());\n\n                std::string maskString = block.at(\"mask\");\n                while (!maskString.empty())\n                {\n                    int npos = maskString.find_first_of(',');\n                    if (npos != -1)\n                    {\n                        uint mask = std::stoul(trim(maskString.substr(0, npos)));\n                        outputTensor.masks.push_back(mask);\n                        maskString.erase(0, npos + 1);\n                    }\n                    else\n                    {\n                        uint mask = std::stoul(trim(maskString));\n                        outputTensor.masks.push_back(mask);\n                        break;\n                    }\n                }\n            }\n\n            outputTensor.numBBoxes = outputTensor.masks.size() > 0\n                ? outputTensor.masks.size()\n                : std::stoul(trim(block.at(\"num\")));\n            outputTensor.numClasses = std::stoul(block.at(\"classes\"));\n            m_OutputTensors.push_back(outputTensor);\n        }\n    }\n}\n\nvoid Yolo::destroyNetworkUtils() {\n    // deallocate the weights\n    for (uint i = 0; i < m_TrtWeights.size(); ++i) {\n        if (m_TrtWeights[i].count > 0)\n            free(const_cast<void*>(m_TrtWeights[i].values));\n    }\n    m_TrtWeights.clear();\n}\n\n"
  },
  {
    "path": "deepstream_plugin_yolov4/yolo.h",
    "content": "/*\n * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#ifndef _YOLO_H_\n#define _YOLO_H_\n\n#include <stdint.h>\n#include <string>\n#include <vector>\n#include <memory>\n\n#include \"NvInfer.h\"\n#include \"trt_utils.h\"\n\n#include \"nvdsinfer_custom_impl.h\"\n\n/**\n * Holds all the file paths required to build a network.\n */\nstruct NetworkInfo\n{\n    std::string networkType;\n    std::string configFilePath;\n    std::string wtsFilePath;\n    std::string deviceType;\n    std::string inputBlobName;\n};\n\n/**\n * Holds information about an output tensor of the yolo network.\n */\nstruct TensorInfo\n{\n    std::string blobName;\n    uint stride{0};\n    uint gridSize{0};\n    uint numClasses{0};\n    uint numBBoxes{0};\n    uint64_t volume{0};\n    std::vector<uint> masks;\n    std::vector<float> anchors;\n    int bindingIndex{-1};\n    float* hostBuffer{nullptr};\n};\n\nclass Yolo : public IModelParser {\npublic:\n    Yolo(const NetworkInfo& networkInfo);\n    ~Yolo() override;\n    bool hasFullDimsSupported() const override { return false; }\n    const char* getModelName() const override {\n        return m_ConfigFilePath.empty() ? m_NetworkType.c_str()\n                                        : m_ConfigFilePath.c_str();\n    }\n    NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;\n\n    nvinfer1::ICudaEngine *createEngine (nvinfer1::IBuilder* builder);\n\nprotected:\n    const std::string m_NetworkType;\n    const std::string m_ConfigFilePath;\n    const std::string m_WtsFilePath;\n    const std::string m_DeviceType;\n    const std::string m_InputBlobName;\n    std::vector<TensorInfo> m_OutputTensors;\n    std::vector<std::map<std::string, std::string>> m_ConfigBlocks;\n    uint m_InputH;\n    uint m_InputW;\n    uint m_InputC;\n    uint64_t m_InputSize;\n\n    // TRT specific members\n    std::vector<nvinfer1::Weights> m_TrtWeights;\n\nprivate:\n    NvDsInferStatus buildYoloNetwork(\n        std::vector<float>& weights, nvinfer1::INetworkDefinition& network);\n    std::vector<std::map<std::string, std::string>> parseConfigFile(\n        const std::string cfgFilePath);\n    void parseConfigBlocks();\n    void destroyNetworkUtils();\n};\n\n#endif // _YOLO_H_\n"
  },
  {
    "path": "deepstream_plugin_yolov4/yoloPlugins.cpp",
    "content": "/*\n * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#include \"yoloPlugins.h\"\n#include \"NvInferPlugin.h\"\n#include <cassert>\n#include <iostream>\n#include <memory>\n\nnamespace {\ntemplate <typename T>\nvoid write(char*& buffer, const T& val)\n{\n    *reinterpret_cast<T*>(buffer) = val;\n    buffer += sizeof(T);\n}\n\ntemplate <typename T>\nvoid read(const char*& buffer, T& val)\n{\n    val = *reinterpret_cast<const T*>(buffer);\n    buffer += sizeof(T);\n}\n} //namespace\n\n// Forward declaration of cuda kernels\ncudaError_t cudaYoloLayerV3 (\n    const void* input, void* output, const uint& batchSize,\n    const uint& gridSize, const uint& numOutputClasses,\n    const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream);\n\nYoloLayerV3::YoloLayerV3 (const void* data, size_t length)\n{\n    const char *d = static_cast<const char*>(data);\n    read(d, m_NumBoxes);\n    read(d, m_NumClasses);\n    read(d, m_GridSize);\n    read(d, m_OutputSize);\n};\n\nYoloLayerV3::YoloLayerV3 (\n    const uint& numBoxes, const uint& numClasses, const uint& gridSize) :\n    m_NumBoxes(numBoxes),\n    m_NumClasses(numClasses),\n    m_GridSize(gridSize)\n{\n    assert(m_NumBoxes > 0);\n    assert(m_NumClasses > 0);\n    assert(m_GridSize > 0);\n    m_OutputSize = m_GridSize * m_GridSize * (m_NumBoxes * (4 + 1 + m_NumClasses));\n};\n\nnvinfer1::Dims\nYoloLayerV3::getOutputDimensions(\n    int index, const nvinfer1::Dims* inputs, int nbInputDims)\n{\n    assert(index == 0);\n    assert(nbInputDims == 1);\n    return inputs[0];\n}\n\nbool YoloLayerV3::supportsFormat (\n    nvinfer1::DataType type, nvinfer1::PluginFormat format) const {\n    return (type == nvinfer1::DataType::kFLOAT &&\n            format == nvinfer1::PluginFormat::kNCHW);\n}\n\nvoid\nYoloLayerV3::configureWithFormat (\n    const nvinfer1::Dims* inputDims, int nbInputs,\n    const nvinfer1::Dims* outputDims, int nbOutputs,\n    nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize)\n{\n    assert(nbInputs == 1);\n    assert (format == nvinfer1::PluginFormat::kNCHW);\n    assert(inputDims != nullptr);\n}\n\nint YoloLayerV3::enqueue(\n    int batchSize, const void* const* inputs, void** outputs, void* workspace,\n    cudaStream_t stream)\n{\n    CHECK(cudaYoloLayerV3(\n              inputs[0], outputs[0], batchSize, m_GridSize, m_NumClasses, m_NumBoxes,\n              m_OutputSize, stream));\n    return 0;\n}\n\nsize_t YoloLayerV3::getSerializationSize() const\n{\n    return sizeof(m_NumBoxes) + sizeof(m_NumClasses) + sizeof(m_GridSize) + sizeof(m_OutputSize);\n}\n\nvoid YoloLayerV3::serialize(void* buffer) const\n{\n    char *d = static_cast<char*>(buffer);\n    write(d, m_NumBoxes);\n    write(d, m_NumClasses);\n    write(d, m_GridSize);\n    write(d, m_OutputSize);\n}\n\nnvinfer1::IPluginV2* YoloLayerV3::clone() const\n{\n    return new YoloLayerV3 (m_NumBoxes, m_NumClasses, m_GridSize);\n}\n\nREGISTER_TENSORRT_PLUGIN(YoloLayerV3PluginCreator);\n"
  },
  {
    "path": "deepstream_plugin_yolov4/yoloPlugins.h",
    "content": "/*\n * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.\n *\n * Permission is hereby granted, free of charge, to any person obtaining a\n * copy of this software and associated documentation files (the \"Software\"),\n * to deal in the Software without restriction, including without limitation\n * the rights to use, copy, modify, merge, publish, distribute, sublicense,\n * and/or sell copies of the Software, and to permit persons to whom the\n * Software is furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n * DEALINGS IN THE SOFTWARE.\n */\n\n#ifndef __YOLO_PLUGINS__\n#define __YOLO_PLUGINS__\n\n#include <cassert>\n#include <cstring>\n#include <cuda_runtime_api.h>\n#include <iostream>\n#include <memory>\n\n#include \"NvInferPlugin.h\"\n\n#define CHECK(status)                                                                              \\\n    {                                                                                              \\\n        if (status != 0)                                                                           \\\n        {                                                                                          \\\n            std::cout << \"Cuda failure: \" << cudaGetErrorString(status) << \" in file \" << __FILE__ \\\n                      << \" at line \" << __LINE__ << std::endl;                                     \\\n            abort();                                                                               \\\n        }                                                                                          \\\n    }\n\nnamespace\n{\nconst char* YOLOV3LAYER_PLUGIN_VERSION {\"1\"};\nconst char* YOLOV3LAYER_PLUGIN_NAME {\"YoloLayerV3_TRT\"};\n} // namespace\n\nclass YoloLayerV3 : public nvinfer1::IPluginV2\n{\npublic:\n    YoloLayerV3 (const void* data, size_t length);\n    YoloLayerV3 (const uint& numBoxes, const uint& numClasses, const uint& gridSize);\n    const char* getPluginType () const override { return YOLOV3LAYER_PLUGIN_NAME; }\n    const char* getPluginVersion () const override { return YOLOV3LAYER_PLUGIN_VERSION; }\n    int getNbOutputs () const override { return 1; }\n\n    nvinfer1::Dims getOutputDimensions (\n        int index, const nvinfer1::Dims* inputs,\n        int nbInputDims) override;\n\n    bool supportsFormat (\n        nvinfer1::DataType type, nvinfer1::PluginFormat format) const override;\n\n    void configureWithFormat (\n        const nvinfer1::Dims* inputDims, int nbInputs,\n        const nvinfer1::Dims* outputDims, int nbOutputs,\n        nvinfer1::DataType type, nvinfer1::PluginFormat format, int maxBatchSize) override;\n\n    int initialize () override { return 0; }\n    void terminate () override {}\n    size_t getWorkspaceSize (int maxBatchSize) const override { return 0; }\n    int enqueue (\n        int batchSize, const void* const* inputs, void** outputs,\n        void* workspace, cudaStream_t stream) override;\n    size_t getSerializationSize() const override;\n    void serialize (void* buffer) const override;\n    void destroy () override { delete this; }\n    nvinfer1::IPluginV2* clone() const override;\n\n    void setPluginNamespace (const char* pluginNamespace)override {\n        m_Namespace = pluginNamespace;\n    }\n    virtual const char* getPluginNamespace () const override {\n        return m_Namespace.c_str();\n    }\n\nprivate:\n    uint m_NumBoxes {0};\n    uint m_NumClasses {0};\n    uint m_GridSize {0};\n    uint64_t m_OutputSize {0};\n    std::string m_Namespace {\"\"};\n};\n\nclass YoloLayerV3PluginCreator : public nvinfer1::IPluginCreator\n{\npublic:\n    YoloLayerV3PluginCreator () {}\n    ~YoloLayerV3PluginCreator () {}\n\n    const char* getPluginName () const override { return YOLOV3LAYER_PLUGIN_NAME; }\n    const char* getPluginVersion () const override { return YOLOV3LAYER_PLUGIN_VERSION; }\n\n    const nvinfer1::PluginFieldCollection* getFieldNames() override {\n        std::cerr<< \"YoloLayerV3PluginCreator::getFieldNames is not implemented\" << std::endl;\n        return nullptr;\n    }\n\n    nvinfer1::IPluginV2* createPlugin (\n        const char* name, const nvinfer1::PluginFieldCollection* fc) override\n    {\n        std::cerr<< \"YoloLayerV3PluginCreator::getFieldNames is not implemented.\\n\";\n        return nullptr;\n    }\n\n    nvinfer1::IPluginV2* deserializePlugin (\n        const char* name, const void* serialData, size_t serialLength) override\n    {\n        std::cout << \"Deserialize yoloLayerV3 plugin: \" << name << std::endl;\n        return new YoloLayerV3(serialData, serialLength);\n    }\n\n    void setPluginNamespace(const char* libNamespace) override {\n        m_Namespace = libNamespace;\n    }\n    const char* getPluginNamespace() const override {\n        return m_Namespace.c_str();\n    }\n\nprivate:\n    std::string m_Namespace {\"\"};\n};\n\n#endif // __YOLO_PLUGINS__\n"
  },
  {
    "path": "docker/constraints.docker",
    "content": "scikit-learn==0.19.1\nopencv-python==3.2.0"
  },
  {
    "path": "docker/opencv_python-3.2.0.egg-info",
    "content": "Metadata-Version: 1.1\nName: opencv-python\nVersion: 3.2.0\nSummary: stub file to make pip aware of apt installed opencv\nHome-page: UNKNOWN\nAuthor: UNKNOWN\nAuthor-email: UNKNOWN\nLicense: UNKNOWN\nDescription: UNKNOWN\nPlatform: UNKNOWN\nClassifier: Programming Language :: Python :: 3"
  },
  {
    "path": "docker/scikit-learn-0.19.1.egg-info",
    "content": "Metadata-Version: 1.1\nName: scikit-learn\nVersion: 0.19.1\nSummary: stub file to make pip aware of apt installed package\nHome-page: UNKNOWN\nAuthor: UNKNOWN\nAuthor-email: UNKNOWN\nLicense: UNKNOWN\nDescription: UNKNOWN\nPlatform: UNKNOWN\nClassifier: Programming Language :: Python :: 3"
  },
  {
    "path": "docker/start.sh",
    "content": "#!/usr/bin/env bash\nnvargus-daemon &\n\necho\necho \"Provided input:\"\necho \" - MASKCAM_INPUT = $MASKCAM_INPUT\"\necho \"Device Address:\"\necho \" - MASKCAM_DEVICE_ADDRESS = $MASKCAM_DEVICE_ADDRESS\"\necho \"Development mode:\"\necho \" - DEV_MODE = $DEV_MODE\"\necho\necho \"MQTT configuration:\"\necho \" - MQTT_BROKER_IP = $MQTT_BROKER_IP\"\necho \" - MQTT_DEVICE_NAME = $MQTT_DEVICE_NAME\"\necho\n\nif [[ $DEV_MODE -eq 1 ]]; then\n    echo \"Development mode enabled, exec maskcam_run.py manually\"\n    /bin/bash\nelse\n    ./maskcam_run.py $MASKCAM_INPUT\nfi\n"
  },
  {
    "path": "docker-compose.yml",
    "content": "version: '2'\n\n# this file only exists for local development\n# pushes to override the restart function\n\nservices:\n  mc_maskcam:\n    restart: no\n    build: .\n    privileged: true\n    ports:\n      - \"1883:1883\"\n      - \"8080:8080\"\n      - \"8554:8554\"\n"
  },
  {
    "path": "docs/BalenaOS-DevKit-Nano-Setup.md",
    "content": "# BalenaOS Developer Kit Nano Setup Instructions for MaskCam\n\nBalenaOS is a very light weight distribution designed for running containers on edge devices. It has a number of advantages for fleet deployment and management, especially when combined with balena's balenaCloud mangament system. Explaining the details of how to set up balenaCloud applications is beyond the scope of this document, but you can test MaskCam on balenaOS using a local development environment setup.\nExcept for installing balenaOS and using a slightly modified launch command, this process is essentially the same as the Jetson Nano Development kit instructions from [the main README](https://github.com/bdtinc/maskcam#running-maskcam-from-a-container-on-a-jetson-nano-developer-kit).\n\nIf you want to use balenaCloud instead (i.e: see your device in the web dashboard), and you're willing to take some time to push the container to your own account, check [Using balenaCloud](#using-balenacloud) at the end of this section.\n\nIn any case, this will require a Jetson Nano Development Kit, a 32 GB or higher Micro-SD card, and another computer (referred to here as main system) on the same network.\n\n### Installing balenaOS\nAs mentioned, this procedure will not link your device with a balenaCloud account, but instead it will enable local development.\n\nFirst, go to https://www.balena.io/os/, scroll to the Download section, and download the development version for Nvidia Jetson Nano SD-CARD.\n\nNext, go to https://www.balena.io/etcher/ and install balenaEtcher.\n\nIn balenaEtcher, simply select the zip file you downloaded, and after inserting the sd card into your main system select it, then press the 'Flash!' icon.\n\nAfter the flashing process is completed, place the sd card into your Jetson Nano Development Kit, ensure the network cable is plugged into the device and power up the Jetson.\n\n### Installing balena CLI\n\nUse [these instructions](https://github.com/balena-io/balena-cli/blob/master/INSTALL.md) to install the balena CLI tool.\n\n### Connecting to your Jetson\n\nFirst, in a terminal on your main system run the command:\n```\nsudo balena scan\n```\nNote the ip address in the result.\n\nNext connect to your Jetson:\n```\nbalena ssh <device ip>\n```\n\nAt this point you are in a console as root user on your Jetson running balenaOS. The commands from this point on are exactly the same as the instructions for running using JetPack on the Nano Developer Kit with the following differences.\n1. The `docker` command is replaced by `balena`\n2. Do not use the `--runtime nvidia` switch. It is automatic on balenaOS for Jetson and you will get errors if you include it.\n\nSo issuing the following commands will run MaskCam:\n```\n$ balena pull maskcam/maskcam-beta\n\n$ balena run --privileged --rm -it --env MASKCAM_DEVICE_ADDRESS=<device ip> -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\n\nNote that setting `MASKCAM_DEVICE_ADDRESS` is optional, and you can also set other configuration parameters exactly as indicated in the [device configuration](https://github.com/bdtinc/maskcam#setting-device-configuration-parameters) section of the main docs.\n\n### Using balenaCloud\nYou can create a free balenaCloud account that will allow you to link up to 10 devices, in order to test some of the most useful features that this platform provides.\nYou'll need to create an App, install the balena CLI and then follow these instructions in order to deploy the maskcam container to your app:\n\nhttps://www.balena.io/docs/learn/deploy/deployment/\n\nFor a simple use case, you can just use the `balena push myApp` command from the root directory of this project (it will take a long time while it builds and pushes the whole image), but you should familiarize yourself with the platform and use the deployment method that better fits your needs.\n"
  },
  {
    "path": "docs/BalenaOS-Photon-Nano-Setup.md",
    "content": "# BalenaOS Setup Instructions for MaskCam on Jetson Nano with Photon Carrier Board\n\nBalenaOS is a very light weight distribution designed for running containers on edge devices. It has a number of advantages for fleet deployment and management, especially when combined with balena's balenaCloud mangament system. Explaining the details of how to set up balenaCloud applications is beyond the scope of this document, but you can test MaskCam on balenaOS using a local development environment setup.\n\nWhen using a Jetson Nano with a Photon carrier board (i.e. a \"Photon Nano\"), the process for installing balenaOS is different than with a Developer Kit. The production Jetson Nano module does not have an SD card slot, so balenaOS has to be directly flashed onto the device over USB, rather than using balenaEtcher. Fortunately, balena has created a flashing tool called [jetson-flash](https://github.com/balena-os/jetson-flash) that allows you to do this.\n\n\n### Setting up jetson-flash\nTo flash the balenaOS image onto the Photon Nano, we need to use Balena's jetson-flash tool. The instructions here show how to install and use jetson-flash on an Ubuntu v18.04 PC. The tool also requires NodeJS >= v10, which can be installed on Ubuntu using [these installation instructions](https://github.com/nodesource/distributions/blob/master/README.md#installation-instructions).\n\nFirst, clone the jetson-flash repository using:\n\n```\ngit clone https://github.com/balena-os/jetson-flash.git\n```\n\nNext, go to the [balenaOS download page](https://www.balena.io/os/#download) and download the CTI Photon Nano Development image. Unzip the image, and move it to the jetson-flash directory.\n\nThen, from inside the `jetson-flash` directory, issue the following command to download the NodeJS package dependencies.\n\n```\nnpm install\n```\n\nNow jetson-flash is ready to be used to flash the OS image onto the Photon Nano.\n\n### Flashing balenaOS onto the Jetson Nano over USB\nBefore flashing the OS, the Photon Nano has to be powered on and put into Force Recovery as shown in the [Photon manual](https://connecttech.com/ftp/pdf/CTIM_NGX002_Manual.pdf). Starting with a Photon carrier board that has a Jetson Nano module installed, plug in 12V power to the barrel jack on the carrier board. Then, press and hold SW2 for at least 10 seconds.\n\nPlug a micro-USB cord from the Ubuntu PC to P13 on the bottom side of the Photon carrier board. Verify the board is in Force Recovery mode by issuing `lsusb` and checking that an Nvidia device is listed. If it isn't, try re-connecting the USB cable and repeating the process to put the board in Force Recovery mode.\n\nBegin flashing by issuing the following command, where `balena.img` is replaced with the filename for the image that was downloaded and extracted previously.\n\n```\nsudo ./bin/cmd.js -f balena.img -m jetson-nano-emmc\n```\n\nThis will initiate the flashing process, which takes about 10 minutes. Once it's complete, unplug the micro-USB cable and power cycle the Photon carrier board. Plug an Ethernet cable into the Photon to connect it to a local network.\n\n### Installing balena CLI\n\nUse [these instructions](https://github.com/balena-io/balena-cli/blob/master/INSTALL.md) to install the balena CLI tool on your Ubuntu PC.\n\n### Connecting to your Jetson\n\nOn the Ubuntu PC, open a terminal and run:\n```\nsudo balena scan\n```\n\nThis will report the IP address of your Photon Nano. Use this IP address with the following command to connect to your Jetson:\n\n```\nbalena ssh <device ip>\n```\n\nAt this point you are in a console as root user on your Jetson running balenaOS. Now, we just need to download the docker container and run it! On balenaOS, \"docker\" is replaced by \"balena\", as shown in the following command. Issue the two commands below to download and run MaskCam:\n\n```\n$ balena pull maskcam/maskcam-beta\n$ balena run --privileged --rm -it --env MASKCAM_DEVICE_ADDRESS=<device ip> -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam/maskcam-beta\n```\n\nNote that setting `MASKCAM_DEVICE_ADDRESS` is optional, and you can also set other configuration parameters exactly as indicated in the [device configuration](https://github.com/bdtinc/maskcam#setting-device-configuration-parameters) section of the main docs.\n\n### Using balenaCloud\nYou can create a free balenaCloud account that will allow you to link up to 10 devices, in order to test some of the most useful features that this platform provides.\nYou'll need to create an App, install the balena CLI and then follow these instructions in order to deploy the maskcam container to your app:\n\nhttps://www.balena.io/docs/learn/deploy/deployment/\n\nFor a simple use case, you can just use the `balena push myApp` command from the root directory of this project (it will take a long time while it builds and pushes the whole image), but you should familiarize yourself with the platform and use the deployment method that better fits your needs.\n"
  },
  {
    "path": "docs/Custom-Container-Development.md",
    "content": "# Custom Container Development\nThe MaskCam code in this repository can be used as a starting point for developing your own smart camera application. If you'd like to develop a custom application (for example, a dog detector that counts how many dogs walk past your house and reports the count to a server), you can build your own container that has the custom code, files, and packages used for your unique application. This page gives instructions on how to build a custom container, rather than downloading our pre-built container from Docker. \n\nThis page is split in to two sections:\n- [How to Build Your Own Container from Source on the Jetson Nano](#how-to-build-your-own-container-from-source-on-the-jetson-nano)\n- [How to Use Your Own Detection Model](#how-to-use-your-own-detection-model)\n\n## How to Build Your Own Container from Source on the Jetson Nano\nThe easiest way to get Maskcam running or set up for development purposes, is by using a container like the one provided in the main [Dockerfile](Dockerfile), which provides the right versions of the OS (Ubuntu 18.04 / Bionic Beaver) and all the system level packages required (mainly NVIDIA L4T packages, GStreamer and DeepStream among others).\n\nFor development, you could make modifications to the code or the container definition, and then rebuild locally using:\n```\ndocker build . -t maskcam_custom\n```\n\nThe above building step could be executed in the target Jetson Nano device (easier), or in another development environment (i.e: pushing the result to [Docker Hub](https://hub.docker.com/) and then pulling from device).\n\nEither way, once the image is ready on the device, remember to run the container using the `--runtime nvidia` and `--privileged` flags (to access the camera device), and mapping the used ports (MQTT -1883-, static file serving -8080- and streaming -8554-, as defined in [maskcam_config.txt](maskcam_config.txt)):\n```\ndocker run --runtime nvidia --privileged --rm -it -p 1883:1883 -p 8080:8080 -p 8554:8554 maskcam_custom\n```\n\nIf you still want to better understand some of the [Dockerfile](Dockerfile) steps, or you need to run without a container and are willing to deal with version conflicts, please see the dependencies manual installation and building instructions at [docs/Manual-Dependency-Installation.md](docs/Manual-Dependencies-Installation.md)\n\n## How to Use Your Own Detection Model\nAs mentioned above, MaskCam is a reference design for smart camera applications that need to perform computer vision tasks on the edge. Specifically, those involving **Object Detection** (for which you'll need a TensorRT engine) and **Tracking** (for which we use [Norfair](https://github.com/tryolabs/norfair)).\n\nDepending on the degree of similarity with this particular use case, you might need to just change the configuration file or some parts of the source code.\n\n### Changing the DeepStream model\nIf you train a new model that is compatible with DeepStream, and has exactly the same (or a subset of the) object classes that are used in this project (`mask`, `no_mask`, `not_visible`, `misplaced`), then you only need to edit the configuration file.\n\nIn particular, you should change only the corresponding parts of the [maskcam_config.txt](maskcam_config.txt) file, which are under the `[property]` section, and make them match your app's configuration parameters (usually under a file `config_infer_primary.txt` in NVIDIA sample apps). You should not need to change any of the `[face-processor]`, `[mqtt]` or `[maskcam]` sections of the config file, in order to use a new compatible model. Also, note that the `interval` parameter of that section will be ignored when `inference-interval-auto` is enabled.\n\nAs an example, you'll find there's commented code showing how to use a `Detectnet_v2` model like the one trained using the [NVIDIA facemask app](https://github.com/NVIDIA-AI-IOT/face-mask-detection), but after converting the label names as mentioned above.\n\nCheck the [DeepStream docs](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_using_custom_model.html) for more information about how to convert a model in order to use it with DeepStream (in particular, the `nvinfer` GStreamer plugin).\n\nRemember to include your new model engine file in the [Dockerfile](Dockerfile) before building the container!\n\n### Changing the object labels\nIf your custom model does not have exactly the same label names, you should edit the [maskcam_inference.py](maskcam/maskcam_inference.py) file, and change the constants `LABEL_MASK`, `LABEL_NO_MASK`, `LABEL_MISPLACED` and `LABEL_NOT_VISIBLE`, to match your needs.\n\nIf your application has nothing to do with detecting face masks, then you'll probably need to change many other parts of the source code for this application, but a good place to start is the `FaceMaskProcessor` class definition, used in the same inference file, which contains all the code related to the DeepStream pipeline.\n"
  },
  {
    "path": "docs/Manual-Dependencies-Installation.md",
    "content": "## Manual installation and building of dependencies\n\nThese instructions are aimed to manually recreate a native environment similar to the one produced by the [Dockerfile](Dockerfile).\n\nThey are tested on **Ubuntu 18.04 (Bionic Beaver)** with **Jetpack 4.4.1**.\n\n1. Make sure these packages are installed at system level (other required packages are not listed here since they're included with Jetpack, check the [Dockerfile](Dockerfile) for a complete list):\n```\nsudo apt install git, python3-pip, python3-opencv python3-libnvinfer python-gi-dev cuda-toolkit-10-2\n```\n\n2. Clone this repo:\n```\ngit clone <copy https or ssh url>.git\n```\n\n3. Copy any `.egg-info` file under `docker/` to the python's `dist-packages` dir, so that system-level installed packages are visible by Pypi:\n```\nsudo cp docker/*.egg-info /usr/lib/python3/dist-packages/\n```\n\n4. Install the requirements listed on `requirements.txt`:\n```\npip3 install -r requirements.txt\n```\n\nIf any version above fails or you want to ignore the pinned versions for some reason, try:\n```\n# Only run this if you don't want to use the pinned versions\npip3 install -r requirements.in -c docker/constraints.docker\n```\n\n5. Install Nvidia DeepStream:\nAside from the system requirements of th previous step, you also need to install\n[DeepStream 5.0](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Quickstart.html#jetson-setup) \n(no need to install Kafka protocol adaptor)\nand also make sure to install the corresponding **python bindings** for GStreamer\n[gst-python](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Python_Sample_Apps.html#python-bindings),\nand for DeepStream [pyds](https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_Python_Sample_Apps.html#metadata-access).\n\n6. Compile YOLOv4 plugin for DeepStream:\nAfter installing DeepStream, compile the YOLOv4 plugin for DeepStream:\n```\ncd <this repo path>/deepstream_plugin_yolov4\nexport CUDA_VER=10.2\nmake\n```\nIf all went well, you should see a library `libnvdsinfer_custom_impl_Yolo.so` in that directory.\n\n7. Download TensorRT engine file from [here](https://maskcam.s3.us-east-2.amazonaws.com/facemask_y4tiny_1024_608_fp16.trt) and save it as `yolo/facemask_y4tiny_1024_608_fp16.trt`.\n\n8. Now you should be ready to run. By default, the device `/dev/video0` will be used, but other devices can be set as first argument:\n```bash\n# Use default input camera /dev/video0\npython3 maskcam_run.py\n\n# Equivalent as above:\npython3 maskcam_run.py v4l2:///dev/video0\n\n# Process an mp4 file instead (no network functions, MQTT and static file server disabled)\npython3 maskcam_run.py file:///path/to/video.mp4\n\n# Read from Raspi2 camera using device-id\npython3 maskcam_run.py argus:///0\n```\n\nCheck the main [README.md](README) for more parameters that can be configured before running, using environment variables.\n"
  },
  {
    "path": "docs/Useful-Development-Scripts.md",
    "content": "# Useful development scripts\nThese scripts are intended to be used by developers. They require some knowledge on the subject they're used for.\n\n## Running TensorRT engine on images\nThis script will run the engine on a folder of images, and generate another folder\nfor the images with the bounding boxes drawn, and the detection score.\n\nTo run this script, you need basically the same general instructions that the regular installation, except that you don't need DeepStream and you do need OpenCV instead.\n\nUsage:\n```\ncd yolo/\npython3 run_yolo_images.py path/to/input/folder path/to/output/folder\n```\n\n## Debugging MQTT communication\nIf you want to see the raw messages that the MQTT broker receives,\nand be able to send custom messages to the device (at your own risk),\nthere's a script `maskcam/mqtt_commander.py`, which may be useful for debugging\non your local computer or from the Jetson device itself.\n\nThe script connects to the MQTT broker and sniffs all the communication to/from any device to the broker.\n```\nexport MQTT_BROKER_IP=<server ip (local or remote)>\nexport MQTT_DEVICE_NAME=<device to command>\npython3 -m maskcam.mqtt_commander\n```\n\n## Convert weights generated using the original darknet implementation to TRT\n 1. Clone the pytorch implementation of YOLOv4:\n```\ngit clone git@github.com:Tianxiaomo/pytorch-YOLOv4.git\n```\n 2. Convert the Darknet model to ONNX using the script in `tool/darknet2onnx.py`, e.g:\n```\nPYTHONPATH='pytorch-YOLOv4:$PYTHONPATH' python3 pytorch-YOLOv4/tool/darknet2onnx.py yolo/facemask-yolov4-tiny.cfg yolo/facemask-yolov4-tiny_best.weights <optional batch size>\n```\n 3. Convert the ONNX model to TRT (on the Jetson Nano, `trtexec` can be found under `/usr/src/tensorrt/bin/trtexec`):\n```\n/usr/src/tensorrt/bin/trtexec --fp16 --onnx=../yolo/yolov4_1_3_608_608_static.onnx --explicitBatch --saveEngine=tensorrt_fp16.trt\n```\n"
  },
  {
    "path": "maskcam/common.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nCODEC_MP4 = \"MP4\"\nCODEC_H265 = \"H265\"\nCODEC_H264 = \"H264\"\nUSBCAM_PROTOCOL = \"v4l2://\"  # Invented by us since there's no URI for this\nRASPICAM_PROTOCOL = \"argus://\"  # Invented by us since there's no URI for this\nCONFIG_FILE = \"maskcam_config.txt\"  # Also used in nvinfer element\n\n# Available commands (to send internally, between processes or via MQTT)\nCMD_FILE_SAVE = \"save_file\"\nCMD_STREAMING_START = \"streaming_start\"\nCMD_STREAMING_STOP = \"streaming_stop\"\nCMD_INFERENCE_RESTART = \"inference_restart\"\nCMD_FILESERVER_RESTART = \"fileserver_restart\"\nCMD_STATUS_REQUEST = \"status_request\"\n"
  },
  {
    "path": "maskcam/config.py",
    "content": "import os\nimport configparser\nfrom maskcam.common import CONFIG_FILE\nfrom maskcam.prints import print_common as print\n\nconfig = configparser.ConfigParser()\nconfig.read(CONFIG_FILE)\nconfig.sections()\n\n# Environment variables overriding config file values\n# Each row is: (ENV_VAR_NAME, (config-section, config-param))\nENV_CONFIG_OVERRIDES = (\n    (\"MASKCAM_INPUT\", (\"maskcam\", \"default-input\")),  # Redundant with start.sh script\n    (\"MASKCAM_DEVICE_ADDRESS\", (\"maskcam\", \"device-address\")),\n    (\"MASKCAM_DETECTION_THRESHOLD\", (\"face-processor\", \"detection-threshold\")),\n    (\"MASKCAM_VOTING_THRESHOLD\", (\"face-processor\", \"voting-threshold\")),\n    (\"MASKCAM_MIN_FACE_SIZE\", (\"face-processor\", \"min-face-size\")),\n    (\"MASKCAM_DISABLE_TRACKER\", (\"face-processor\", \"disable-tracker\")),\n    (\"MASKCAM_ALERT_MIN_VISIBLE_PEOPLE\", (\"maskcam\", \"alert-min-visible-people\")),\n    (\"MASKCAM_ALERT_MAX_TOTAL_PEOPLE\", (\"maskcam\", \"alert-max-total-people\")),\n    (\"MASKCAM_ALERT_NO_MASK_FRACTION\", (\"maskcam\", \"alert-no-mask-fraction\")),\n    (\"MASKCAM_STATISTICS_PERIOD\", (\"maskcam\", \"statistics-period\")),\n    (\"MASKCAM_TIMEOUT_INFERENCE_RESTART\", (\"maskcam\", \"timeout-inference-restart\")),\n    (\"MASKCAM_CAMERA_FRAMERATE\", (\"maskcam\", \"camera-framerate\")),\n    (\"MASKCAM_CAMERA_FLIP_METHOD\", (\"maskcam\", \"camera-flip-method\")),\n    (\"MASKCAM_OUTPUT_VIDEO_WIDTH\", (\"maskcam\", \"output-video-width\")),\n    (\"MASKCAM_OUTPUT_VIDEO_HEIGHT\", (\"maskcam\", \"output-video-height\")),\n    (\"MASKCAM_INFERENCE_INTERVAL_AUTO\", (\"maskcam\", \"inference-interval-auto\")),\n    (\"MASKCAM_INFERENCE_MAX_FPS\", (\"maskcam\", \"inference-max-fps\")),\n    (\"MASKCAM_INFERENCE_LOG_INTERVAL\", (\"maskcam\", \"inference-log-interval\")),\n    (\"MASKCAM_STREAMING_START_DEFAULT\", (\"maskcam\", \"streaming-start-default\")),\n    (\"MASKCAM_STREAMING_PORT\", (\"maskcam\", \"streaming-port\")),\n    (\"MASKCAM_FILESERVER_ENABLED\", (\"maskcam\", \"fileserver-enabled\")),\n    (\"MASKCAM_FILESERVER_FORCE_SAVE\", (\"maskcam\", \"fileserver-force-save\")),\n    (\"MASKCAM_FILESERVER_VIDEO_PERIOD\", (\"maskcam\", \"fileserver-video-period\")),\n    (\"MASKCAM_FILESERVER_VIDEO_DURATION\", (\"maskcam\", \"fileserver-video-duration\")),\n    (\"MASKCAM_FILESERVER_HDD_DIR\", (\"maskcam\", \"fileserver-hdd-dir\")),\n    (\"MQTT_BROKER_IP\", (\"mqtt\", \"mqtt-broker-ip\")),\n    (\"MQTT_BROKER_PORT\", (\"mqtt\", \"mqtt-broker-port\")),\n    (\"MQTT_DEVICE_NAME\", (\"mqtt\", \"mqtt-device-name\")),\n    (\"MQTT_DEVICE_DESCRIPTION\", (\"mqtt\", \"mqtt-device-description\")),\n)\n\n# Apply overrides\nfor env_var, config_param in ENV_CONFIG_OVERRIDES:\n    override_value = os.environ.get(env_var, None)\n    if override_value is not None:\n        config[config_param[0]][config_param[1]] = override_value\n\n\ndef print_config_overrides():\n    # Leave prints separated so that it can be executed on demand\n    # by one single process instead of each import\n    for env_var, config_param in ENV_CONFIG_OVERRIDES:\n        override_value = os.environ.get(env_var, None)\n        if override_value is not None:\n            print(f\"\\nConfig override {env_var}={override_value}\")\n"
  },
  {
    "path": "maskcam/maskcam_filesave.py",
    "content": "#!/usr/bin/env python3\n\n################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\nimport gi\nimport pyds\nimport sys\nimport time\nimport signal\nimport platform\nimport threading\nimport multiprocessing as mp\nfrom datetime import datetime\n\ngi.require_version(\"Gst\", \"1.0\")\ngi.require_version(\"GstBase\", \"1.0\")\ngi.require_version(\"GstRtspServer\", \"1.0\")\nfrom gi.repository import GLib, Gst, GstRtspServer, GstBase\n\nfrom .prints import print_filesave as print\nfrom .common import CODEC_MP4, CODEC_H264, CODEC_H265, CONFIG_FILE\nfrom .utils import glib_cb_restart\nfrom .config import config, print_config_overrides\n\ne_interrupt = None\n\n\ndef make_elm_or_print_err(factoryname, name, printedname, detail=\"\"):\n    \"\"\"Creates an element with Gst Element Factory make.\n    Return the element  if successfully created, otherwise print\n    to stderr and return None.\n    \"\"\"\n    print(\"Creating\", printedname)\n    elm = Gst.ElementFactory.make(factoryname, name)\n    if not elm:\n        print(\"Unable to create \" + printedname, error=True)\n        if detail:\n            print(detail)\n    return elm\n\n\ndef sigint_handler(sig, frame):\n    # This function is not used if e_external_interrupt is provided\n    print(\"[red]Ctrl+C pressed. Interrupting file-save...[/red]\")\n    e_interrupt.set()\n\n\ndef main(\n    config: dict,\n    output_filename: str,\n    udp_port: int,\n    e_external_interrupt: mp.Event = None,\n):\n    global e_interrupt\n\n    codec = config[\"maskcam\"][\"codec\"]\n    streaming_clock_rate = int(config[\"maskcam\"][\"streaming-clock-rate\"])\n\n    udp_capabilities = f\"application/x-rtp,media=video,encoding-name=(string){codec},clock-rate={streaming_clock_rate}\"\n\n    # Standard GStreamer initialization\n    # GObject.threads_init()  # Doesn't seem necessary (see https://pygobject.readthedocs.io/en/latest/guide/threading.html)\n    Gst.init(None)\n\n    # Create gstreamer elements\n    # Create Pipeline element that will form a connection of other elements\n    print(\n        \"[green]Creating:[/green] file-saving pipeline \"\n        f\"UDP(port:{udp_port})->File({output_filename})\"\n    )\n    pipeline = Gst.Pipeline()\n\n    if not pipeline:\n        print(\"Unable to create Pipeline\", error=True)\n\n    udpsrc = make_elm_or_print_err(\"udpsrc\", \"udpsrc\", \"UDP Source\")\n    udpsrc.set_property(\"port\", udp_port)\n    udpsrc.set_property(\"buffer-size\", 524288)\n    udpsrc.set_property(\"caps\", Gst.Caps.from_string(udp_capabilities))\n    rtpjitterbuffer = make_elm_or_print_err(\n        \"rtpjitterbuffer\", \"rtpjitterbuffer\", \"RTP Jitter Buffer\"\n    )\n    # Default mode is 1 (slave), acts as a live source and gets laggy\n    rtpjitterbuffer.set_property(\"mode\", 4)\n\n    # caps_udp = make_elm_or_print_err(\"capsfilter\", \"caps_udp\", \"UDP RTP capabilities\")\n    # caps_udp.set_property(\"caps\", Gst.Caps.from_string(udp_capabilities))\n\n    if codec == CODEC_MP4:\n        print(\"Creating MPEG-4 payload decoder\")\n        rtpdepay = make_elm_or_print_err(\"rtpmp4vpay\", \"rtpdepay\", \"RTP MPEG-4 Payload Decoder\")\n        codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", \"Code Parser\")\n    elif codec == CODEC_H264:\n        print(\"Creating H264 payload decoder\")\n        rtpdepay = make_elm_or_print_err(\"rtph264depay\", \"rtpdepay\", \"RTP H264 Payload Decoder\")\n        codeparser = make_elm_or_print_err(\"h264parse\", \"h264-parser\", \"Code Parser\")\n    else:  # Default: H265 (recommended)\n        print(\"Creating H265 payload decoder\")\n        rtpdepay = make_elm_or_print_err(\"rtph265depay\", \"rtpdepay\", \"RTP H265 Payload Decoder\")\n        codeparser = make_elm_or_print_err(\"h265parse\", \"h265-parser\", \"Code Parser\")\n\n    # Workaround for this issue: https://gitlab.freedesktop.org/gstreamer/gst-plugins-good/-/issues/410\n    GstBase.BaseParse.set_pts_interpolation(codeparser, True)\n\n    container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n    filesink = make_elm_or_print_err(\"filesink\", \"filesink\", \"File Sink\")\n    filesink.set_property(\"location\", output_filename)\n    # filesink.set_property(\"sync\", False)\n    # filesink.set_property(\"async\", False)\n\n    pipeline.add(udpsrc)\n    pipeline.add(rtpjitterbuffer)\n    # pipeline.add(caps_udp)\n    pipeline.add(rtpdepay)\n    pipeline.add(codeparser)\n    pipeline.add(container)\n    pipeline.add(filesink)\n\n    # Pipeline Links\n    udpsrc.link(rtpjitterbuffer)\n    rtpjitterbuffer.link(rtpdepay)\n    # caps_udp.link(rtpdepay)\n    rtpdepay.link(codeparser)\n    codeparser.link(container)\n    container.link(filesink)\n\n    # GLib loop required for RTSP server\n    g_loop = GLib.MainLoop()\n    g_context = g_loop.get_context()\n\n    # GStreamer message bus\n    bus = pipeline.get_bus()\n\n    if e_external_interrupt is None:\n        # Use threading instead of mp.Event() for sigint_handler, see:\n        # https://bugs.python.org/issue41606\n        e_interrupt = threading.Event()\n        signal.signal(signal.SIGINT, sigint_handler)\n        print(\"[green bold]Press Ctrl+C to save video and exit[/green bold]\")\n    else:\n        # If there's an external interrupt, don't capture SIGINT\n        e_interrupt = e_external_interrupt\n\n    # Periodic gloop interrupt (see utils.glib_cb_restart)\n    t_check = 50\n    GLib.timeout_add(t_check, glib_cb_restart, t_check)\n\n    # Custom event loop, allows saving file on Ctrl+C press\n    running = True\n\n    # start play back and listen to events\n    pipeline.set_state(Gst.State.PLAYING)\n    print(\"[green]Playing:[/green] file-saving pipeline UDP->File\\n\")\n\n    while running:\n        g_context.iteration(may_block=True)\n        message = bus.pop()\n        if message is not None:\n            t = message.type\n\n            if t == Gst.MessageType.EOS:\n                print(f\"File saved: [yellow]{output_filename}[/yellow]\")\n                running = False\n            elif t == Gst.MessageType.WARNING:\n                err, debug = message.parse_warning()\n                print(\"%s: %s\" % (err, debug), warning=True)\n            elif t == Gst.MessageType.ERROR:\n                err, debug = message.parse_error()\n                print(\"%s: %s\" % (err, debug), error=True)\n                running = False\n        if e_interrupt.is_set():\n            print(\"Interruption received. Sending EOS to generate video file.\")\n            # This will allow the filesink to create a readable mp4 file\n            container.send_event(Gst.Event.new_eos())\n            e_interrupt.clear()\n\n    print(\"File-saver main loop ending.\")\n    # cleanup\n    pipeline.set_state(Gst.State.NULL)\n\n\nif __name__ == \"__main__\":\n    # Print any ENV var config override to avoid confusions\n    print_config_overrides()\n\n    # Check arguments\n    output_filename = None\n    udp_port = None\n    if len(sys.argv) > 1:\n        output_filename = sys.argv[1]\n    if len(sys.argv) > 2:\n        udp_port = int(sys.argv[2])\n\n    if not output_filename:\n        output_dir = config[\"maskcam\"][\"fileserver-hdd-dir\"]\n        output_filename = f\"{output_dir}/{datetime.today().strftime('%Y%m%d_%H%M%S')}.mp4\"\n    if not udp_port:  # Use first listed in config\n        udp_port = int(config[\"maskcam\"][\"udp-ports-filesave\"].split(\",\")[0])\n    print(f\"Output file: {output_filename}\")\n\n    sys.exit(main(config=config, output_filename=output_filename, udp_port=udp_port))\n"
  },
  {
    "path": "maskcam/maskcam_fileserver.py",
    "content": "#!/usr/bin/env python3\n\n################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\nimport sys\nimport time\nimport socket\nimport threading\nimport multiprocessing as mp\nfrom datetime import datetime\nfrom http.server import SimpleHTTPRequestHandler\nfrom socketserver import TCPServer, ThreadingTCPServer\n\nfrom .config import config, print_config_overrides\nfrom .utils import get_ip_address\nfrom .prints import print_fileserver as print\n\n\nclass Handler(SimpleHTTPRequestHandler):\n    # Needed to set extensions_map\n    pass\n\n\ndef start_server(httpd_server):\n    httpd_server.serve_forever(poll_interval=0.5)\n\n\ndef cb_handle_error(request, client_address):\n    # Not important, happens very often but nothing actually fails\n    print(f\"Static file server: File request interrupted [client: {client_address}]\")\n\n\ndef main(config, directory=None, e_external_interrupt: mp.Event = None):\n    if directory is None:\n        directory = config[\"maskcam\"][\"fileserver-hdd-dir\"]\n    directory = os.fspath(directory)\n    print(f\"Serving static files from directory: [yellow]{directory}[/yellow]\")\n\n    port = int(config[\"maskcam\"][\"fileserver-port\"])\n\n    # Create dir if doesn't exist\n    os.system(f\"mkdir -p {directory}\")\n    os.chdir(directory)  # easiest way\n\n    # Force download mp4 files\n    Handler.extensions_map[\".mp4\"] = \"application/octet-stream\"\n\n    print(f\"[green]Static server STARTED[/green] at http://{get_ip_address()}:{port}\")\n    with ThreadingTCPServer((\"\", port), Handler) as httpd:\n        httpd.handle_error = cb_handle_error\n        s = threading.Thread(target=start_server, args=(httpd,))\n        s.start()\n        try:\n            if e_external_interrupt is not None:\n                e_external_interrupt.wait()  # blocking\n            else:\n                s.join()  # blocking\n        except KeyboardInterrupt:\n            print(\"Ctrl+C pressed\")\n        print(\"Shutting down static file server\")\n        httpd.shutdown()\n        httpd.server_close()\n        s.join(timeout=1)\n        if s.is_alive():\n            print(\"Server thread did not stop\", warning=True)\n        else:\n            print(\"Server shut down correctly\")\n    print(f\"Server alive threads: {threading.enumerate()}\")\n\n\nif __name__ == \"__main__\":\n\n    # Print any ENV var config override to avoid confusions\n    print_config_overrides()\n\n    # Input source\n    directory = sys.argv[1] if len(sys.argv) > 1 else None\n    main(config, directory=directory)\n"
  },
  {
    "path": "maskcam/maskcam_inference.py",
    "content": "#!/usr/bin/env python3\n\n################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\nimport gi\nimport pyds\nimport sys\nimport ipdb\nimport time\nimport signal\nimport platform\nimport threading\nimport numpy as np\nimport multiprocessing as mp\nfrom rich.console import Console\nfrom datetime import datetime, timezone\n\n\ngi.require_version(\"Gst\", \"1.0\")\ngi.require_version(\"GstRtspServer\", \"1.0\")\nfrom gi.repository import GLib, Gst, GstRtspServer\n\nfrom norfair.tracker import Tracker, Detection\n\nfrom .config import config, print_config_overrides\nfrom .prints import print_inference as print\nfrom .common import (\n    CODEC_MP4,\n    CODEC_H264,\n    CODEC_H265,\n    USBCAM_PROTOCOL,\n    RASPICAM_PROTOCOL,\n    CONFIG_FILE,\n)\nfrom .utils import glib_cb_restart, load_udp_ports_filesaving\n\n\n# YOLO labels. See obj.names file\nLABEL_MASK = \"mask\"\nLABEL_NO_MASK = \"no_mask\"  # YOLOv4: no_mask\nLABEL_MISPLACED = \"misplaced\"\nLABEL_NOT_VISIBLE = \"not_visible\"\nFRAMES_LOG_INTERVAL = int(config[\"maskcam\"][\"inference-log-interval\"])\n\n# Global vars\nframe_number = 0\nstart_time = None\nend_time = None\nconsole = Console()\ne_interrupt = None\n\n\nclass FaceMaskProcessor:\n    def __init__(\n        self, th_detection=0, th_vote=0, min_face_size=0, tracker_period=1, disable_tracker=False\n    ):\n        self.people_votes = {}\n        self.current_people = set()\n        self.th_detection = th_detection\n        self.th_vote = th_vote\n        self.tracker_period = tracker_period\n        self.min_face_size = min_face_size\n        self.disable_detection_validation = False\n        self.min_votes = 5\n        self.max_votes = 50\n        self.color_mask = (0.0, 1.0, 0.0)  # green\n        self.color_no_mask = (1.0, 0.0, 0.0)  # red\n        self.color_unknown = (1.0, 1.0, 0.0)  # yellow\n        self.draw_raw_detections = disable_tracker\n        self.draw_tracked_people = not disable_tracker\n        self.stats_lock = threading.Lock()\n\n        # Norfair Tracker\n        if disable_tracker:\n            self.tracker = None\n        else:\n            self.tracker = Tracker(\n                distance_function=self.keypoints_distance,\n                detection_threshold=self.th_detection,\n                distance_threshold=1,\n                point_transience=8,\n                hit_inertia_min=15,\n                hit_inertia_max=45,\n            )\n\n    def keypoints_distance(self, detected_pose, tracked_pose):\n        detected_points = detected_pose.points\n        estimated_pose = tracked_pose.estimate\n        min_box_size = min(\n            max(\n                detected_points[1][0] - detected_points[0][0],  # x2 - x1\n                detected_points[1][1] - detected_points[0][1],  # y2 - y1\n                1,\n            ),\n            max(\n                estimated_pose[1][0] - estimated_pose[0][0],  # x2 - x1\n                estimated_pose[1][1] - estimated_pose[0][1],  # y2 - y1\n                1,\n            ),\n        )\n        mean_distance_normalized = (\n            np.mean(np.linalg.norm(detected_points - estimated_pose, axis=1)) / min_box_size\n        )\n        return mean_distance_normalized\n\n    def validate_detection(self, box_points, score, label):\n        if self.disable_detection_validation:\n            return True\n        box_width = box_points[1][0] - box_points[0][0]\n        box_height = box_points[1][1] - box_points[0][1]\n        return min(box_width, box_height) >= self.min_face_size and score >= self.th_detection\n\n    def add_detection(self, person_id, label, score):\n        # This function is called from streaming thread\n        with self.stats_lock:\n            self.current_people.add(person_id)\n            if person_id not in self.people_votes:\n                self.people_votes[person_id] = 0\n            if score > self.th_vote:\n                if label == LABEL_MASK:\n                    self.people_votes[person_id] += 1\n                elif label == LABEL_NO_MASK or LABEL_MISPLACED:\n                    self.people_votes[person_id] -= 1\n                # max_votes limit\n                self.people_votes[person_id] = np.clip(\n                    self.people_votes[person_id], -self.max_votes, self.max_votes\n                )\n\n    def get_person_label(self, person_id):\n        person_votes = self.people_votes[person_id]\n        if abs(person_votes) >= self.min_votes:\n            color = self.color_mask if person_votes > 0 else self.color_no_mask\n            label = \"mask\" if person_votes > 0 else \"no mask\"\n        else:\n            color = self.color_unknown\n            label = \"not visible\"\n        return f\"{person_id}|{label}({abs(person_votes)})\", color\n\n    def get_instant_statistics(self, refresh=True):\n        \"\"\"\n        Get statistics only including people that appeared on camera since last refresh\n        \"\"\"\n        instant_stats = self.get_statistics(filter_ids=self.current_people)\n        if refresh:\n            with self.stats_lock:\n                self.current_people = set()\n        return instant_stats\n\n    def get_statistics(self, filter_ids=None):\n        with self.stats_lock:\n            if filter_ids is not None:\n                filtered_people = {\n                    id: votes for id, votes in self.people_votes.items() if id in filter_ids\n                }\n            else:\n                filtered_people = self.people_votes\n            total_people = len(filtered_people)\n            total_classified = 0\n            total_mask = 0\n            for person_id in filtered_people:\n                person_votes = filtered_people[person_id]\n                if abs(person_votes) >= self.min_votes:\n                    total_classified += 1\n                    if person_votes > 0:\n                        total_mask += 1\n        return total_people, total_classified, total_mask\n\n\ndef cb_add_statistics(cb_args):\n    stats_period, stats_queue, face_processor = cb_args\n\n    people_total, people_classified, people_mask = face_processor.get_instant_statistics(\n        refresh=True\n    )\n    people_no_mask = people_classified - people_mask\n\n    # stats_queue is an mp.Queue optionally provided externally (in main())\n    stats_queue.put_nowait(\n        {\n            \"people_total\": people_total,\n            \"people_with_mask\": people_mask,\n            \"people_without_mask\": people_no_mask,\n            \"timestamp\": datetime.timestamp(datetime.now(timezone.utc)),\n        }\n    )\n\n    # Next report timeout\n    GLib.timeout_add_seconds(stats_period, cb_add_statistics, cb_args)\n\n\ndef sigint_handler(sig, frame):\n    # This function is not used if e_external_interrupt is provided\n    print(\"[red]Ctrl+C pressed. Interrupting inference...[/red]\")\n    e_interrupt.set()\n\n\ndef is_aarch64():\n    return platform.uname()[4] == \"aarch64\"\n\n\ndef draw_detection(display_meta, n_draw, box_points, detection_label, color):\n    # print(f\"Drawing {n_draw} | {detection_label}\")\n    # print(box_points)\n    rect = display_meta.rect_params[n_draw]\n\n    ((x1, y1), (x2, y2)) = box_points\n    rect.left = x1\n    rect.top = y1\n    rect.width = x2 - x1\n    rect.height = y2 - y1\n    # print(f\"{x1} {y1}, {x2} {y2}\")\n    # Bug: bg color is always green\n    # rect.has_bg_color = True\n    # rect.bg_color.set(0.5, 0.5, 0.5, 0.6)  # RGBA\n    rect.border_color.set(*color, 1.0)\n    rect.border_width = 2\n    label = display_meta.text_params[n_draw]\n    label.x_offset = x1\n    label.y_offset = y2\n    label.font_params.font_name = \"Verdana\"\n    label.font_params.font_size = 9\n    label.font_params.font_color.set(0, 0, 0, 1.0)  # Black\n    # label.display_text = f\"{person.id} | {detection_p:.2f}\"\n    label.display_text = detection_label\n    label.set_bg_clr = True\n    label.text_bg_clr.set(*color, 0.5)\n\n    display_meta.num_rects = n_draw + 1\n    display_meta.num_labels = n_draw + 1\n\n\ndef cb_buffer_probe(pad, info, cb_args):\n    global frame_number\n    global start_time\n\n    face_processor, e_ready = cb_args\n    gst_buffer = info.get_buffer()\n    if not gst_buffer:\n        print(\"Unable to get GstBuffer\", error=True)\n        return\n\n    # Set e_ready event to notify the pipeline is working (e.g: for orchestrator)\n    if e_ready is not None and not e_ready.is_set():\n        print(\"Inference pipeline setting [green]e_ready[/green]\")\n        e_ready.set()\n\n    # Retrieve batch metadata from the gst_buffer\n    # Note that pyds.gst_buffer_get_nvds_batch_meta() expects the\n    # C address of gst_buffer as input, which is obtained with hash(gst_buffer)\n    batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))\n\n    l_frame = batch_meta.frame_meta_list\n    while l_frame is not None:\n        try:\n            # Note that l_frame.data needs a cast to pyds.NvDsFrameMeta\n            # The casting is done by pyds.glist_get_nvds_frame_meta()\n            # The casting also keeps ownership of the underlying memory\n            # in the C code, so the Python garbage collector will leave\n            # it alone.\n            # frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)\n            frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)\n        except StopIteration:\n            break\n\n        frame_number = frame_meta.frame_num\n        # num_detections = frame_meta.num_obj_meta\n        l_obj = frame_meta.obj_meta_list\n        detections = []\n        obj_meta_list = []\n        while l_obj is not None:\n            try:\n                # Casting l_obj.data to pyds.NvDsObjectMeta\n                # obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)\n                obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)\n            except StopIteration:\n                break\n            obj_meta_list.append(obj_meta)\n            obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)\n            box = obj_meta.rect_params\n            # print(f\"{obj_meta.obj_label} | {obj_meta.confidence}\")\n\n            box_points = (\n                (box.left, box.top),\n                (box.left + box.width, box.top + box.height),\n            )\n            box_p = obj_meta.confidence\n            box_label = obj_meta.obj_label\n            if face_processor.validate_detection(box_points, box_p, box_label):\n                det_data = {\"label\": box_label, \"p\": box_p}\n                detections.append(\n                    Detection(\n                        np.array(box_points),\n                        data=det_data,\n                    )\n                )\n                # print(f\"Added detection: {det_data}\")\n            try:\n                l_obj = l_obj.next\n            except StopIteration:\n                break\n\n        # Remove all object meta to avoid drawing. Do this outside while since we're modifying list\n        for obj_meta in obj_meta_list:\n            # Remove this to avoid drawing label texts\n            pyds.nvds_remove_obj_meta_from_frame(frame_meta, obj_meta)\n        obj_meta_list = None\n\n        # Each meta object carries max 16 rects/labels/etc.\n        max_drawings_per_meta = 16  # This is hardcoded, not documented\n\n        if face_processor.tracker is not None:\n            # Track, count and draw tracked people\n            tracked_people = face_processor.tracker.update(\n                detections, period=face_processor.tracker_period\n            )\n            # Filter out people with no live points (don't draw)\n            drawn_people = [person for person in tracked_people if person.live_points.any()]\n\n            if face_processor.draw_tracked_people:\n                for n_person, person in enumerate(drawn_people):\n                    points = person.estimate\n                    box_points = points.clip(0).astype(int)\n\n                    # Update mask votes\n                    face_processor.add_detection(\n                        person.id,\n                        person.last_detection.data[\"label\"],\n                        person.last_detection.data[\"p\"],\n                    )\n                    label, color = face_processor.get_person_label(person.id)\n\n                    # Index of this person's drawing in the current meta\n                    n_draw = n_person % max_drawings_per_meta\n\n                    if n_draw == 0:  # Initialize meta\n                        # Acquiring a display meta object. The memory ownership remains in\n                        # the C code so downstream plugins can still access it. Otherwise\n                        # the garbage collector will claim it when this probe function exits.\n                        display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n                        pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n\n                    draw_detection(display_meta, n_draw, box_points, label, color)\n\n        # Raw detections\n        if face_processor.draw_raw_detections:\n            for n_detection, detection in enumerate(detections):\n                points = detection.points\n                box_points = points.clip(0).astype(int)\n                label = detection.data[\"label\"]\n                if label == LABEL_MASK:\n                    color = face_processor.color_mask\n                elif label == LABEL_NO_MASK or label == LABEL_MISPLACED:\n                    color = face_processor.color_no_mask\n                else:\n                    color = face_processor.color_unknown\n                label = f\"{label} | {detection.data['p']:.2f}\"\n                n_draw = n_detection % max_drawings_per_meta\n\n                if n_draw == 0:  # Initialize meta\n                    # Acquiring a display meta object. The memory ownership remains in\n                    # the C code so downstream plugins can still access it. Otherwise\n                    # the garbage collector will claim it when this probe function exits.\n                    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)\n                    pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)\n                draw_detection(display_meta, n_draw, box_points, label, color)\n\n            # Using pyds.get_string() to get display_text as string\n            # print(pyds.get_string(py_nvosd_text_params.display_text))\n            # print(\".\", end=\"\", flush=True)\n        # print(\"\")\n        if not frame_number % FRAMES_LOG_INTERVAL:\n            print(f\"Processed {frame_number} frames...\")\n\n        try:\n            l_frame = l_frame.next\n        except StopIteration:\n            break\n    # Start timer at the end of first frame processing\n    if start_time is None:\n        start_time = time.time()\n    return Gst.PadProbeReturn.OK\n\n\ndef cb_newpad(decodebin, decoder_src_pad, data):\n    print(\"In cb_newpad\\n\")\n    caps = decoder_src_pad.get_current_caps()\n    gststruct = caps.get_structure(0)\n    gstname = gststruct.get_name()\n    source_bin = data\n    features = caps.get_features(0)\n\n    # Need to check if the pad created by the decodebin is for video and not\n    # audio.\n    print(\"gstname=\", gstname)\n    if gstname.find(\"video\") != -1:\n        # Link the decodebin pad only if decodebin has picked nvidia\n        # decoder plugin nvdec_*. We do this by checking if the pad caps contain\n        # NVMM memory features.\n        print(\"features=\", features)\n        if features.contains(\"memory:NVMM\"):\n            # Get the source bin ghost pad\n            bin_ghost_pad = source_bin.get_static_pad(\"src\")\n            if not bin_ghost_pad.set_target(decoder_src_pad):\n                print(\"Failed to link decoder src pad to source bin ghost pad\", error=True)\n        else:\n            print(\"Decodebin did not pick nvidia decoder plugin\", error=True)\n\n\ndef decodebin_child_added(child_proxy, Object, name, user_data):\n    print(f\"Decodebin child added: {name}\")\n    if name.find(\"decodebin\") != -1:\n        Object.connect(\"child-added\", decodebin_child_added, user_data)\n    if is_aarch64() and name.find(\"nvv4l2decoder\") != -1:\n        Object.set_property(\"bufapi-version\", True)\n\n\ndef create_source_bin(index, uri):\n    print(\"Creating source bin\")\n\n    # Create a source GstBin to abstract this bin's content from the rest of the\n    # pipeline\n    bin_name = \"source-bin-%02d\" % index\n    print(bin_name)\n    nbin = Gst.Bin.new(bin_name)\n    if not nbin:\n        print(\"Unable to create source bin\", error=True)\n\n    # Source element for reading from the uri.\n    # We will use decodebin and let it figure out the container format of the\n    # stream and the codec and plug the appropriate demux and decode plugins.\n    uri_decode_bin = Gst.ElementFactory.make(\"uridecodebin\", \"uri-decode-bin\")\n    if not uri_decode_bin:\n        print(\"Unable to create uri decode bin\", error=True)\n    # We set the input uri to the source element\n    uri_decode_bin.set_property(\"uri\", uri)\n    # Connect to the \"pad-added\" signal of the decodebin which generates a\n    # callback once a new pad for raw data has beed created by the decodebin\n    uri_decode_bin.connect(\"pad-added\", cb_newpad, nbin)\n    uri_decode_bin.connect(\"child-added\", decodebin_child_added, nbin)\n\n    # We need to create a ghost pad for the source bin which will act as a proxy\n    # for the video decoder src pad. The ghost pad will not have a target right\n    # now. Once the decode bin creates the video decoder and generates the\n    # cb_newpad callback, we will set the ghost pad target to the video decoder\n    # src pad.\n    Gst.Bin.add(nbin, uri_decode_bin)\n    bin_pad = nbin.add_pad(Gst.GhostPad.new_no_target(\"src\", Gst.PadDirection.SRC))\n    if not bin_pad:\n        print(\"Failed to add ghost pad in source bin\", error=True)\n        return None\n    return nbin\n\n\ndef make_elm_or_print_err(factoryname, name, printedname):\n    \"\"\"Creates an element with Gst Element Factory make.\n    Return the element  if successfully created, otherwise print\n    to stderr and return None.\n    \"\"\"\n    print(\"Creating\", printedname)\n    elm = Gst.ElementFactory.make(factoryname, name)\n    if not elm:\n        print(\"Unable to create \", printedname, error=True)\n        show_troubleshooting()\n    return elm\n\n\ndef show_troubleshooting():\n    # On Jetson, there is a problem with the encoder failing to initialize\n    # due to limitation on TLS usage. To work around this, preload libgomp.\n    # Add a reminder here in case the user forgets.\n    print(\n        \"\"\"\n    [yellow]TROUBLESHOOTING HELP[/yellow]\n\n    [yellow]If the error is like: v4l-camera-source / reason not-negotiated[/yellow]\n    [green]Solution:[/green] configure camera capabilities\n    Run the script under utils/gst_capabilities.sh and find the lines with type\n    video/x-raw ...\n    Find a suitable framerate=X/1 (with X being an integer like 24, 15, etc.)\n    Then edit config_maskcam.txt and change the line:\n    camera-framerate=X\n    Or configure using --env MASKCAM_CAMERA_FRAMERATE=X (see README)\n\n    [yellow]If the error is like:\n    /usr/lib/aarch64-linux-gnu/libgomp.so.1: cannot allocate memory in static TLS block[/yellow]\n    [green]Solution:[/green] preload the offending library\n    export LD_PRELOAD=/usr/lib/aarch64-linux-gnu/libgomp.so.1\n\n    [yellow]END HELP[/yellow]\n    \"\"\"\n    )\n\n\ndef main(\n    config: dict,\n    input_filename: str,\n    output_filename: str = None,\n    e_external_interrupt: mp.Event = None,\n    stats_queue: mp.Queue = None,\n    e_ready: mp.Event = None,\n):\n    global frame_number\n    global start_time\n    global end_time\n    global e_interrupt\n\n    # Load all udp ports to output video\n    udp_ports = {int(config[\"maskcam\"][\"udp-port-streaming\"])}\n    load_udp_ports_filesaving(config, udp_ports)\n\n    codec = config[\"maskcam\"][\"codec\"]\n    stats_period = int(config[\"maskcam\"][\"statistics-period\"])\n\n    # Original: 1920x1080, bdti_resized: 1024x576, yolo-input: 1024x608\n    output_width = int(config[\"maskcam\"][\"output-video-width\"])\n    output_height = int(config[\"maskcam\"][\"output-video-height\"])\n    output_bitrate = 6000000  # Nice for h264@1024x576: 4000000\n\n    # Two types of camera supported: USB or Raspi\n    usbcam_input = USBCAM_PROTOCOL in input_filename\n    raspicam_input = RASPICAM_PROTOCOL in input_filename\n    camera_input = usbcam_input or raspicam_input\n    if camera_input:\n        camera_framerate = int(config[\"maskcam\"][\"camera-framerate\"])\n        camera_flip_method = int(config[\"maskcam\"][\"camera-flip-method\"])\n\n    # Set nvinfer.interval (number of frames to skip inference and use tracker instead)\n    if camera_input and int(config[\"maskcam\"][\"inference-interval-auto\"]):\n        max_fps = int(config[\"maskcam\"][\"inference-max-fps\"])\n        skip_inference = camera_framerate // max_fps\n        print(f\"Auto calculated frames to skip inference: {skip_inference}\")\n    else:\n        skip_inference = int(config[\"property\"][\"interval\"])\n        print(f\"Configured frames to skip inference: {skip_inference}\")\n\n    # FaceMask initialization\n    face_tracker_period = skip_inference + 1  # tracker_period=skipped + inference frame(1)\n    face_detection_threshold = float(config[\"face-processor\"][\"detection-threshold\"])\n    face_voting_threshold = float(config[\"face-processor\"][\"voting-threshold\"])\n    face_min_face_size = int(config[\"face-processor\"][\"min-face-size\"])\n    face_disable_tracker = int(config[\"face-processor\"][\"disable-tracker\"])\n    face_processor = FaceMaskProcessor(\n        th_detection=face_detection_threshold,\n        th_vote=face_voting_threshold,\n        min_face_size=face_min_face_size,\n        tracker_period=face_tracker_period,\n        disable_tracker=face_disable_tracker,\n    )\n\n    # Standard GStreamer initialization\n    Gst.init(None)\n\n    # Create gstreamer elements\n    # Create Pipeline element that will form a connection of other elements\n    print(\"Creating Pipeline \\n \")\n    pipeline = Gst.Pipeline()\n\n    if not pipeline:\n        print(\"Unable to create Pipeline\", error=True)\n\n    if camera_input:\n        if usbcam_input:\n            input_device = input_filename[len(USBCAM_PROTOCOL) :]\n            source = make_elm_or_print_err(\"v4l2src\", \"v4l2-camera-source\", \"Camera input\")\n            source.set_property(\"device\", input_device)\n            nvvidconvsrc = make_elm_or_print_err(\n                \"nvvideoconvert\", \"convertor_src2\", \"Convertor src 2\"\n            )\n\n            # Input camera configuration\n            # Use ./gst_capabilities.sh to get the list of available capabilities from /dev/video0\n            camera_capabilities = f\"video/x-raw, framerate={camera_framerate}/1\"\n        elif raspicam_input:\n            input_device = input_filename[len(RASPICAM_PROTOCOL) :]\n            source = make_elm_or_print_err(\n                \"nvarguscamerasrc\", \"nv-argus-camera-source\", \"RaspiCam input\"\n            )\n            source.set_property(\"sensor-id\", int(input_device))\n            source.set_property(\"bufapi-version\", 1)\n\n            # Special camera_capabilities for raspicam\n            camera_capabilities = f\"video/x-raw(memory:NVMM),framerate={camera_framerate}/1\"\n            nvvidconvsrc = make_elm_or_print_err(\"nvvidconv\", \"convertor_flip\", \"Convertor flip\")\n            nvvidconvsrc.set_property(\"flip-method\", camera_flip_method)\n\n        # Misterious converting sequence from deepstream_test_1_usb.py\n        caps_camera = make_elm_or_print_err(\"capsfilter\", \"camera_src_caps\", \"Camera caps filter\")\n        caps_camera.set_property(\n            \"caps\",\n            Gst.Caps.from_string(camera_capabilities),\n        )\n        vidconvsrc = make_elm_or_print_err(\"videoconvert\", \"convertor_src1\", \"Convertor src 1\")\n        caps_vidconvsrc = make_elm_or_print_err(\n            \"capsfilter\", \"nvmm_caps\", \"NVMM caps for input stream\"\n        )\n        caps_vidconvsrc.set_property(\"caps\", Gst.Caps.from_string(\"video/x-raw(memory:NVMM)\"))\n    else:\n        source_bin = create_source_bin(0, input_filename)\n\n    # Create nvstreammux instance to form batches from one or more sources.\n    streammux = make_elm_or_print_err(\"nvstreammux\", \"Stream-muxer\", \"NvStreamMux\")\n    streammux.set_property(\"width\", output_width)\n    streammux.set_property(\"height\", output_height)\n    streammux.set_property(\"enable-padding\", True)  # Keeps aspect ratio, but adds black margin\n    streammux.set_property(\"batch-size\", 1)\n    streammux.set_property(\"batched-push-timeout\", 4000000)\n\n    # Adding this element after muxer will cause detections to get delayed\n    # videorate = make_elm_or_print_err(\"videorate\", \"Vide-rate\", \"Video Rate\")\n\n    # Inference element: object detection using TRT engine\n    pgie = make_elm_or_print_err(\"nvinfer\", \"primary-inference\", \"pgie\")\n    pgie.set_property(\"config-file-path\", CONFIG_FILE)\n    pgie.set_property(\"interval\", skip_inference)\n\n    # Use convertor to convert from NV12 to RGBA as required by nvosd\n    convert_pre_osd = make_elm_or_print_err(\n        \"nvvideoconvert\", \"convert_pre_osd\", \"Converter NV12->RGBA\"\n    )\n\n    # OSD: to draw on the RGBA buffer\n    nvosd = make_elm_or_print_err(\"nvdsosd\", \"onscreendisplay\", \"OSD (nvosd)\")\n    nvosd.set_property(\"process-mode\", 2)  # 0: CPU Mode, 1: GPU (only dGPU), 2: VIC (Jetson only)\n    # nvosd.set_property(\"display-bbox\", False)  # Bug: Removes all squares\n    nvosd.set_property(\"display-clock\", False)\n    nvosd.set_property(\"display-text\", True)  # Needed for any text\n\n    # Finally encode and save the osd output\n    queue = make_elm_or_print_err(\"queue\", \"queue\", \"Queue\")\n    convert_post_osd = make_elm_or_print_err(\n        \"nvvideoconvert\", \"convert_post_osd\", \"Converter RGBA->NV12\"\n    )\n\n    # Video capabilities: check format and GPU/CPU location\n    capsfilter = make_elm_or_print_err(\"capsfilter\", \"capsfilter\", \"capsfilter\")\n    if codec == CODEC_MP4:  # Not hw accelerated\n        caps = Gst.Caps.from_string(\"video/x-raw, format=I420\")\n    else:  # hw accelerated\n        caps = Gst.Caps.from_string(\"video/x-raw(memory:NVMM), format=I420\")\n    capsfilter.set_property(\"caps\", caps)\n\n    # Encoder: H265 has more efficient compression\n    if codec == CODEC_MP4:\n        print(\"Creating MPEG-4 stream\")\n        encoder = make_elm_or_print_err(\"avenc_mpeg4\", \"encoder\", \"Encoder\")\n        codeparser = make_elm_or_print_err(\"mpeg4videoparse\", \"mpeg4-parser\", \"Code Parser\")\n        rtppay = make_elm_or_print_err(\"rtpmp4vpay\", \"rtppay\", \"RTP MPEG-44 Payload\")\n    elif codec == CODEC_H264:\n        print(\"Creating H264 stream\")\n        encoder = make_elm_or_print_err(\"nvv4l2h264enc\", \"encoder\", \"Encoder\")\n        encoder.set_property(\"preset-level\", 1)\n        encoder.set_property(\"bufapi-version\", 1)\n        codeparser = make_elm_or_print_err(\"h264parse\", \"h264-parser\", \"Code Parser\")\n        rtppay = make_elm_or_print_err(\"rtph264pay\", \"rtppay\", \"RTP H264 Payload\")\n    else:  # Default: H265 (recommended)\n        print(\"Creating H265 stream\")\n        encoder = make_elm_or_print_err(\"nvv4l2h265enc\", \"encoder\", \"Encoder\")\n        encoder.set_property(\"preset-level\", 1)\n        encoder.set_property(\"bufapi-version\", 1)\n        codeparser = make_elm_or_print_err(\"h265parse\", \"h265-parser\", \"Code Parser\")\n        rtppay = make_elm_or_print_err(\"rtph265pay\", \"rtppay\", \"RTP H265 Payload\")\n\n    encoder.set_property(\"insert-sps-pps\", 1)\n    encoder.set_property(\"bitrate\", output_bitrate)\n\n    splitter_file_udp = make_elm_or_print_err(\"tee\", \"tee_file_udp\", \"Splitter file/UDP\")\n\n    # UDP streaming\n    queue_udp = make_elm_or_print_err(\"queue\", \"queue_udp\", \"UDP queue\")\n    multiudpsink = make_elm_or_print_err(\"multiudpsink\", \"multi udpsink\", \"Multi UDP Sink\")\n    # udpsink.set_property(\"host\", \"127.0.0.1\")\n    # udpsink.set_property(\"port\", udp_port)\n\n    # Comma separated list of clients, don't add spaces :S\n    client_list = [f\"127.0.0.1:{udp_port}\" for udp_port in udp_ports]\n    multiudpsink.set_property(\"clients\", \",\".join(client_list))\n\n    multiudpsink.set_property(\"async\", False)\n    multiudpsink.set_property(\"sync\", True)\n\n    if output_filename is not None:\n        queue_file = make_elm_or_print_err(\"queue\", \"queue_file\", \"File save queue\")\n        # codeparser already created above depending on codec\n        container = make_elm_or_print_err(\"qtmux\", \"qtmux\", \"Container\")\n        filesink = make_elm_or_print_err(\"filesink\", \"filesink\", \"File Sink\")\n        filesink.set_property(\"location\", output_filename)\n    else:  # Fake sink, no save\n        fakesink = make_elm_or_print_err(\"fakesink\", \"fakesink\", \"Fake Sink\")\n\n    # Add elements to the pipeline\n    if camera_input:\n        pipeline.add(source)\n        pipeline.add(caps_camera)\n        pipeline.add(vidconvsrc)\n        pipeline.add(nvvidconvsrc)\n        pipeline.add(caps_vidconvsrc)\n    else:\n        pipeline.add(source_bin)\n    pipeline.add(streammux)\n    pipeline.add(pgie)\n\n    pipeline.add(convert_pre_osd)\n    pipeline.add(nvosd)\n    pipeline.add(queue)\n    pipeline.add(convert_post_osd)\n    pipeline.add(capsfilter)\n    pipeline.add(encoder)\n    pipeline.add(splitter_file_udp)\n\n    if output_filename is not None:\n        pipeline.add(queue_file)\n        pipeline.add(codeparser)\n        pipeline.add(container)\n        pipeline.add(filesink)\n    else:\n        pipeline.add(fakesink)\n\n    # Output to UDP\n    pipeline.add(queue_udp)\n    pipeline.add(rtppay)\n    pipeline.add(multiudpsink)\n\n    print(\"Linking elements in the Pipeline \\n\")\n\n    # Pipeline Links\n    if camera_input:\n        source.link(caps_camera)\n        caps_camera.link(vidconvsrc)\n        vidconvsrc.link(nvvidconvsrc)\n        nvvidconvsrc.link(caps_vidconvsrc)\n        srcpad = caps_vidconvsrc.get_static_pad(\"src\")\n    else:\n        srcpad = source_bin.get_static_pad(\"src\")\n    sinkpad = streammux.get_request_pad(\"sink_0\")\n    if not srcpad or not sinkpad:\n        print(\"Unable to get file source or mux sink pads\", error=True)\n    srcpad.link(sinkpad)\n    streammux.link(pgie)\n    pgie.link(convert_pre_osd)\n    convert_pre_osd.link(nvosd)\n    nvosd.link(queue)\n    queue.link(convert_post_osd)\n    convert_post_osd.link(capsfilter)\n    capsfilter.link(encoder)\n    encoder.link(splitter_file_udp)\n\n    # Split stream to file and rtsp\n    tee_file = splitter_file_udp.get_request_pad(\"src_%u\")\n    tee_udp = splitter_file_udp.get_request_pad(\"src_%u\")\n\n    # Output to File or fake sinks\n    if output_filename is not None:\n        tee_file.link(queue_file.get_static_pad(\"sink\"))\n        queue_file.link(codeparser)\n        codeparser.link(container)\n        container.link(filesink)\n    else:\n        tee_file.link(fakesink.get_static_pad(\"sink\"))\n\n    # Output to UDP\n    tee_udp.link(queue_udp.get_static_pad(\"sink\"))\n    queue_udp.link(rtppay)\n    rtppay.link(multiudpsink)\n\n    # Lets add probe to get informed of the meta data generated, we add probe to\n    # the sink pad of the osd element, since by that time, the buffer would have\n    # had got all the metadata.\n    osdsinkpad = nvosd.get_static_pad(\"sink\")\n    if not osdsinkpad:\n        print(\"Unable to get sink pad of nvosd\", error=True)\n\n    cb_args = (face_processor, e_ready)\n    osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, cb_buffer_probe, cb_args)\n\n    # GLib loop required for RTSP server\n    g_loop = GLib.MainLoop()\n    g_context = g_loop.get_context()\n\n    # GStreamer message bus\n    bus = pipeline.get_bus()\n\n    if e_external_interrupt is None:\n        # Use threading instead of mp.Event() for sigint_handler, see:\n        # https://bugs.python.org/issue41606\n        e_interrupt = threading.Event()\n        signal.signal(signal.SIGINT, sigint_handler)\n        print(\"[green bold]Press Ctrl+C to stop pipeline[/green bold]\")\n    else:\n        # If there's an external interrupt, don't capture SIGINT\n        e_interrupt = e_external_interrupt\n\n    # start play back and listen to events\n    pipeline.set_state(Gst.State.PLAYING)\n\n    # After setting pipeline to PLAYING, stop it even on exceptions\n    try:\n        time_start_playing = time.time()\n\n        # Timer to add statistics to queue\n        if stats_queue is not None:\n            cb_args = stats_period, stats_queue, face_processor\n            GLib.timeout_add_seconds(stats_period, cb_add_statistics, cb_args)\n\n        # Periodic gloop interrupt (see utils.glib_cb_restart)\n        t_check = 100\n        GLib.timeout_add(t_check, glib_cb_restart, t_check)\n\n        # Custom event loop\n        running = True\n        while running:\n            g_context.iteration(may_block=True)\n\n            message = bus.pop()\n            if message is not None:\n                t = message.type\n\n                if t == Gst.MessageType.EOS:\n                    print(\"End-of-stream\\n\")\n                    running = False\n                elif t == Gst.MessageType.WARNING:\n                    err, debug = message.parse_warning()\n                    print(f\"{err}: {debug}\", warning=True)\n                elif t == Gst.MessageType.ERROR:\n                    err, debug = message.parse_error()\n                    print(f\"{err}: {debug}\", error=True)\n                    show_troubleshooting()\n                    running = False\n            if e_interrupt.is_set():\n                # Send EOS to container to generate a valid mp4 file\n                if output_filename is not None:\n                    container.send_event(Gst.Event.new_eos())\n                    multiudpsink.send_event(Gst.Event.new_eos())\n                else:\n                    pipeline.send_event(Gst.Event.new_eos())  # fakesink EOS won't work\n\n        end_time = time.time()\n        print(\"Inference main loop ending.\")\n        pipeline.set_state(Gst.State.NULL)\n\n        # Profiling display\n        if start_time is not None and end_time is not None:\n            total_time = end_time - start_time\n            total_frames = frame_number\n            inference_frames = total_frames // (skip_inference + 1)\n            print()\n            print(f\"[bold yellow] ---- Profiling ---- [/bold yellow]\")\n            print(f\"Inference frames: {inference_frames} | Processed frames: {total_frames}\")\n            print(f\"Time from time_start_playing: {end_time - time_start_playing:.2f} seconds\")\n            print(f\"Total time skipping first inference: {total_time:.2f} seconds\")\n            print(f\"Avg. time/frame: {total_time/total_frames:.4f} secs\")\n            print(f\"[bold yellow]FPS: {total_frames/total_time:.1f} frames/second[/bold yellow]\\n\")\n            if skip_inference != 0:\n                print(\n                    \"[red]NOTE: FPS calculated skipping inference every\"\n                    f\" interval={skip_inference} frames[/red]\"\n                )\n        if output_filename is not None:\n            print(f\"Output file saved: [green bold]{output_filename}[/green bold]\")\n    except:\n        console.print_exception()\n        pipeline.set_state(Gst.State.NULL)\n\n\nif __name__ == \"__main__\":\n    print_config_overrides()\n    # Check input arguments\n    output_filename = None\n    if len(sys.argv) > 1:\n        input_filename = sys.argv[1]\n        print(f\"Provided input source: {input_filename}\")\n        if len(sys.argv) > 2:\n            output_filename = sys.argv[2]\n            print(f\"Save output file: [green]{output_filename}[/green]\")\n    else:\n        input_filename = config[\"maskcam\"][\"default-input\"]\n        print(f\"Using input from config file: {input_filename}\")\n\n    sys.exit(\n        main(\n            config=config,\n            input_filename=input_filename,\n            output_filename=output_filename,\n        )\n    )\n"
  },
  {
    "path": "maskcam/maskcam_streaming.py",
    "content": "#!/usr/bin/env python3\n\n################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport gi\nimport pyds\nimport sys\nimport time\nimport signal\nimport platform\nimport threading\nimport multiprocessing as mp\nfrom datetime import datetime\n\ngi.require_version(\"Gst\", \"1.0\")\ngi.require_version(\"GstRtspServer\", \"1.0\")\nfrom gi.repository import GLib, Gst, GstRtspServer\n\nfrom .config import config, print_config_overrides\nfrom .prints import print_streaming as print\nfrom .utils import get_ip_address, glib_cb_restart, get_streaming_address\nfrom .common import CODEC_MP4, CODEC_H264, CODEC_H265, CONFIG_FILE\n\ne_interrupt = None\n\n\ndef sigint_handler(sig, frame):\n    # This function is not used if e_external_interrupt is provided\n    print(\"[red]Ctrl+C pressed. Interrupting streaming...[/red]\")\n    e_interrupt.set()\n\n\ndef main(config, e_external_interrupt: mp.Event = None):\n    global e_interrupt\n    udp_port = int(config[\"maskcam\"][\"udp-port-streaming\"])\n    codec = config[\"maskcam\"][\"codec\"]\n    # Streaming address: rtsp://<jetson-ip>:<rtsp-port>/<rtsp-address>\n    rtsp_port = int(config[\"maskcam\"][\"streaming-port\"])\n    rtsp_address = config[\"maskcam\"][\"streaming-path\"]\n    streaming_clock_rate = int(config[\"maskcam\"][\"streaming-clock-rate\"])\n\n    # udp_capabilities = f\"application/x-rtp,media=video,encoding-name={codec},payload=96\"\n\n    print(f\"Codec: {codec}\")\n\n    # Standard GStreamer initialization\n    Gst.init(None)\n\n    # Start streaming\n    server = GstRtspServer.RTSPServer.new()\n    server.props.service = str(rtsp_port)\n    server.attach(None)\n\n    factory = GstRtspServer.RTSPMediaFactory.new()\n    factory.set_launch(\n        f\"( udpsrc name=pay0 port={udp_port} buffer-size=524288\"\n        f' caps=\"application/x-rtp, media=video, clock-rate={streaming_clock_rate},'\n        f' encoding-name=(string){codec}, payload=96 \" )'\n    )\n    factory.set_shared(True)\n    server.get_mount_points().add_factory(rtsp_address, factory)\n\n    streaming_address = get_streaming_address(get_ip_address(), rtsp_port, rtsp_address)\n    print(f\"\\n\\n[green bold]Streaming[/green bold] at {streaming_address}\\n\\n\")\n\n    # GLib loop required for RTSP server\n    g_loop = GLib.MainLoop()\n    g_context = g_loop.get_context()\n\n    if e_external_interrupt is None:\n        # Use threading instead of mp.Event() for sigint_handler, see:\n        # https://bugs.python.org/issue41606\n        e_interrupt = threading.Event()\n        signal.signal(signal.SIGINT, sigint_handler)\n        print(\"[green bold]Press Ctrl+C to stop pipeline[/green bold]\")\n    else:\n        # If there's an external interrupt, don't capture SIGINT\n        e_interrupt = e_external_interrupt\n\n    # Periodic gloop interrupt (see utils.glib_cb_restart)\n    t_check = 100\n    GLib.timeout_add(t_check, glib_cb_restart, t_check)\n\n    while not e_interrupt.is_set():\n        g_context.iteration(may_block=True)\n\n    print(\"Ending streaming\")\n\n\nif __name__ == \"__main__\":\n\n    # Print any config override by env variables to avoid confusions\n    print_config_overrides()\n    main(config)\n"
  },
  {
    "path": "maskcam/mqtt_commander.py",
    "content": "#!/usr/bin/env python3\n\n################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport sys\nimport json\nimport time\nfrom rich import print\n\nfrom .mqtt_common import mqtt_send_msg, mqtt_connect_broker\nfrom .mqtt_common import MQTT_BROKER_IP, MQTT_BROKER_PORT, MQTT_DEVICE_NAME\nfrom .mqtt_common import (\n    MQTT_TOPIC_ALERTS,\n    MQTT_TOPIC_FILES,\n    MQTT_TOPIC_HELLO,\n    MQTT_TOPIC_STATS,\n    MQTT_TOPIC_COMMANDS,\n)\nfrom .common import (\n    CMD_FILE_SAVE,\n    CMD_STREAMING_START,\n    CMD_STREAMING_STOP,\n    CMD_INFERENCE_RESTART,\n)\n\n\ndef show_message(mqtt_client, userdata, message):\n    print(f\"Message received in topic: [yellow]{message.topic}[/yellow]\")\n    print(json.loads(message.payload.decode()))\n\n\nif MQTT_BROKER_IP is None or MQTT_DEVICE_NAME is None:\n    print(\n        \"\\n[red]MQTT is DISABLED[/red]\"\n        \" since MQTT_BROKER_IP or MQTT_DEVICE_NAME env vars are not defined\\n\"\n    )\n    sys.exit(0)\n\n# Subscribe to some topics\nprint(\"\\n[blue]Available topics:[/blue]\")\nprint(MQTT_TOPIC_ALERTS)\nprint(MQTT_TOPIC_FILES)\nprint(MQTT_TOPIC_HELLO)\nprint(MQTT_TOPIC_STATS)\nprint(MQTT_TOPIC_COMMANDS)\ntopics_subscribe = []\nwhile True:\n    topic = input(\"\\nSubscribe to topic (empty to continue): \")\n    if topic == \"\":\n        break\n    topics_subscribe.append((topic, 2))  # Use qos=2\n\n# Connect to client and subscribe\nmqtt_client = mqtt_connect_broker(\n    client_id=\"commander\",\n    broker_ip=MQTT_BROKER_IP,\n    broker_port=MQTT_BROKER_PORT,\n    subscribe_to=topics_subscribe,\n)\nmqtt_client.on_message = show_message\n\ntime.sleep(1)  # Wait to print connection messages\n# Send commands\nprint(\"\\n[blue]Available commands:[/blue]\")\nprint(CMD_FILE_SAVE)\nprint(CMD_STREAMING_START)\nprint(CMD_STREAMING_STOP)\nprint(CMD_INFERENCE_RESTART)\nwhile True:\n    cmd = input(\"\\nSend command to device (q to exit):\\n\")\n    if cmd == \"q\":\n        break\n    payload = {\"device_id\": MQTT_DEVICE_NAME, \"command\": cmd}\n    mqtt_send_msg(mqtt_client, MQTT_TOPIC_COMMANDS, payload)\n"
  },
  {
    "path": "maskcam/mqtt_common.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\nimport json\nfrom multiprocessing import Queue\nfrom typing import Callable, List\nfrom paho.mqtt import client as paho_mqtt_client\n\nfrom .config import config\nfrom .prints import print_mqtt as print\n\n# MQTT topics\nMQTT_TOPIC_HELLO = \"hello\"\nMQTT_TOPIC_UPDATE = \"device-status\"\nMQTT_TOPIC_STATS = \"receive-from-jetson\"\nMQTT_TOPIC_ALERTS = \"alerts\"\nMQTT_TOPIC_FILES = \"video-files\"\nMQTT_TOPIC_COMMANDS = \"commands\"\n\nconfig_broker_ip = config[\"mqtt\"][\"mqtt-broker-ip\"].strip()\nconfig_device_name = config[\"mqtt\"][\"mqtt-device-name\"].strip()\n\n# Must come defined or MQTT gets disabled\nMQTT_BROKER_IP = None\nif config_broker_ip and config_broker_ip != \"0\":\n    MQTT_BROKER_IP = config_broker_ip\n\nMQTT_DEVICE_NAME = None\nif config_device_name and config_device_name != \"0\":\n    MQTT_DEVICE_NAME = config_device_name\n\nMQTT_BROKER_PORT = int(config[\"mqtt\"][\"mqtt-broker-port\"])\nMQTT_DEVICE_DESCRIPTION = config[\"mqtt\"][\"mqtt-device-description\"]\n\nmqtt_msg_queue = Queue(maxsize=100)  # 100 mqtt messages stored max\n\n\ndef mqtt_send_queue(mqtt_client):\n    success = True\n    while not mqtt_msg_queue.empty() and success:\n        q_msg = mqtt_msg_queue.get_nowait()\n        print(f\"Sending enqueued message to topic: {q_msg['topic']}\")\n        success = mqtt_send_msg(mqtt_client, q_msg[\"topic\"], q_msg[\"message\"])\n    return success\n\n\ndef mqtt_connect_broker(\n    client_id: str,\n    broker_ip: str,\n    broker_port: int,\n    subscribe_to: List[List] = None,\n    cb_success: Callable = None,\n) -> paho_mqtt_client:\n    def cb_on_connect(client, userdata, flags, code):\n        if code == 0:\n            print(\"[green]Connected to MQTT Broker[/green]\")\n            if subscribe_to:\n                print(\"Subscribing to topics:\")\n                print(subscribe_to)\n                client.subscribe(subscribe_to)  # Always re-suscribe after reconnecting\n            if cb_success is not None:\n                cb_success(client)\n            if not mqtt_send_queue(client):\n                print(f\"Failed to send MQTT message queue after connecting\", warning=True)\n        else:\n            print(f\"Failed to connect to MQTT[/red], return code {code}\", warning=True)\n\n    def cb_on_disconnect(client, userdata, code):\n        print(f\"Disconnected from MQTT Broker, code: {code}\")\n\n    client = paho_mqtt_client.Client(client_id)\n    client.on_connect = cb_on_connect\n    client.on_disconnect = cb_on_disconnect\n    client.connect(broker_ip, broker_port)\n    client.loop_start()\n    return client\n\n\ndef mqtt_send_msg(mqtt_client, topic, message, enqueue=True):\n    if mqtt_client is None:\n        print(f\"MQTT not connected. Skipping message to topic: {topic}\")\n        return False\n\n    # Check previous enqueued msgs\n    mqtt_send_queue(mqtt_client)\n\n    result = mqtt_client.publish(topic, json.dumps(message))\n    if result[0] == 0:\n        print(f\"{topic} | MQTT message [green]SENT[/green]\")\n        print(message)\n        return True\n    else:\n        if enqueue:\n            if not mqtt_msg_queue.full():\n                print(f\"{topic} | MQTT message [yellow]ENQUEUED[/yellow]\")\n                mqtt_msg_queue.put_nowait({\"topic\": topic, \"message\": message})\n            else:\n                print(f\"{topic} | MQTT message [red]DROPPED: FULL QUEUE[/red]\", error=True)\n        else:\n            print(f\"{topic} | MQTT message [yellow]DISCARDED[/yellow]\", warning=True)\n        return False\n"
  },
  {
    "path": "maskcam/prints.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport logging\nfrom rich.logging import RichHandler\n\nlogging.basicConfig(\n    level=\"NOTSET\",\n    format=\"%(message)s\",\n    datefmt=\"|\",  # Not needed w/balena, use [%X] otherwise\n    handlers=[RichHandler(markup=True)],\n)\n\nlog = logging.getLogger(\"rich\")\n\n\ndef print_process(\n    color, process_name, *args, error=False, warning=False, exception=False, **kwargs\n):\n    msg = \" \".join([str(arg) for arg in args])  # Concatenate all incoming strings or objects\n    rich_msg = f\"[{color}]{process_name}[/{color}] | {msg}\"\n    if error:\n        log.error(rich_msg)\n    elif warning:\n        log.warning(rich_msg)\n    elif exception:\n        log.exception(rich_msg)\n    else:\n        log.info(rich_msg)\n\n\ndef print_run(*args, **kwargs):\n    print_process(\"blue\", \"maskcam-run\", *args, **kwargs)\n\n\ndef print_fileserver(*args, **kwargs):\n    print_process(\"dark_violet\", \"file-server\", *args, **kwargs)\n\n\ndef print_filesave(*args, **kwargs):\n    print_process(\"dark_magenta\", \"file-save\", *args, **kwargs)\n\n\ndef print_streaming(*args, **kwargs):\n    print_process(\"dark_green\", \"streaming\", *args, **kwargs)\n\n\ndef print_inference(*args, **kwargs):\n    print_process(\"bright_yellow\", \"inference\", *args, **kwargs)\n\n\ndef print_mqtt(*args, **kwargs):\n    print_process(\"bright_green\", \"mqtt\", *args, **kwargs)\n\n\ndef print_common(*args, **kwargs):\n    print_process(\"white\", \"common\", *args, **kwargs)\n"
  },
  {
    "path": "maskcam/utils.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom .config import config\nfrom gi.repository import GLib\n\nADDRESS_UNKNOWN_LABEL = \"<device-address-not-configured>\"\n\n\ndef get_ip_address():\n    result_value = config[\"maskcam\"][\"device-address\"].strip()\n    if not result_value or result_value == \"0\":\n        result_value = ADDRESS_UNKNOWN_LABEL\n    return result_value\n\n\ndef get_streaming_address(host_address, rtsp_port, rtsp_path):\n    return f\"rtsp://{host_address}:{rtsp_port}{rtsp_path}\"\n\n\ndef format_tdelta(time_delta):\n    # Format to show timedelta objects as string\n    if time_delta is None:\n        return \"N/A\"\n    return f\"{time_delta}\".split(\".\")[0]  # Remove nanoseconds\n\n\ndef glib_cb_restart(t_restart):\n    # Timer to avoid GLoop locking infinitely\n    # We want to run g_context.iteration(may_block=True)\n    # since may_block=False will use high CPU,\n    # and adding sleeps lags event processing.\n    # But we want to check periodically for other events\n    GLib.timeout_add(t_restart, glib_cb_restart, t_restart)\n\n\ndef load_udp_ports_filesaving(config, udp_ports_pool):\n    for port in config[\"maskcam\"][\"udp-ports-filesave\"].split(\",\"):\n        udp_ports_pool.add(int(port))\n    return udp_ports_pool\n"
  },
  {
    "path": "maskcam_config.txt",
    "content": "\n# NOTE: Some values might be overriden via ENV vars (check maskcam/config.py)\n\n[face-processor]\n# Detections with score below this threshold will be discarded\ndetection-threshold=0.1\n# Only vote mask/no_mask when detection score is above this\nvoting-threshold=0.75\n# Smaller detections (in pixels) will be discarded\nmin-face-size=8\n\n# Disable tracker to draw raw detections and set thresholds above\ndisable-tracker=0\n\n[mqtt]\n# These are just placeholders, to enable MQTT define these env variables:\n# MQTT_BROKER_IP and MQTT_DEVICE_NAME\nmqtt-broker-ip=0\nmqtt-device-name=0\n\nmqtt-broker-port=1883\nmqtt-device-description=MaskCam @ Jetson Nano\n\n[maskcam]\n# Alert conditions\n# Minimum people to even calculate no-mask-fraction\nalert-min-visible-people=1\n# More than this fraction of people without mask will raise an alarm\nalert-no-mask-fraction=0.25\n# More than this people detected will raise alarm despite no-mask-fraction\nalert-max-total-people=10\n\n# Time to send statistics in seconds. Set smaller than fileserver-video-period\nstatistics-period=15\n\n# Time (in seconds) to restart statistics (and the whole Deepstream inference process)\n# Set to 0 to disable / 24hs = 86400 seconds\ntimeout-inference-restart=86400\ninference-log-interval=300\n\n# Other valid inputs:\n#  - CSI cameras like RaspiCam:\n#    -> argus://0\n#  - Any file:\n#    -> file:///absolute/path/to/file.mp4\ndefault-input=v4l2:///dev/video0\n\n# Output/streaming video resolution. 1024x576 keeps 4k aspect ratio of 1.777\noutput-video-width=1024\noutput-video-height=576\n\n# Run utils/gst_capabilities.sh and find video/x-raw entries\ncamera-framerate=30\n\n# Only used for argus:// inputs\ncamera-flip-method=0\n\n# Auto-calculate nvinfer's `interval` based on `camera-framerate` and `inference-max-fps`\n# to avoid delaying the pipeline. This will override the fixed `interval` parameter below\n# E.g: if framerate=30 and max-fps=14,\n#      -> will set interval=2 so that inference runs only 1/3 of incoming frames\ninference-interval-auto=1\n# Set this value to the actual FPS bottleneck of the model. Only used if inference-interval-auto.\n# e.g: run the model on a video file (instead of live camera source) to determine model's FPS on your device\ninference-max-fps=14\n\nudp-port-streaming=5400\n# 2 ports for overlapping file-save processes\nudp-ports-filesave=5401,5402\n\nstreaming-start-default=1\nstreaming-port=8554\nstreaming-path=/maskcam\nstreaming-clock-rate=90000\n# Supported: MP4, H264, H265\n# Recommended H264 for stability on video save\ncodec=H264\n\n# Sequentially saving videos\nfileserver-enabled=1\nfileserver-port=8080\nfileserver-video-period=30\nfileserver-video-duration=35\nfileserver-force-save=0\nfileserver-ram-dir=/dev/shm\n# Use /tmp/* to clean saved videos on system reboot\nfileserver-hdd-dir=/tmp/saved_videos\n\n# IP or domain address that this device will show in info messages (logs and web frontend, for streaming and file downloading)\n# Recommended: use env variable MASKCAM_DEVICE_ADDRESS to set this\ndevice-address=0\n\n[property]\ninterval=0\ngpu-id=0\n# Was: \nnet-scale-factor=0.0039215697906911373\n#0=RGB, 1=BGR\nmodel-color-format=0\n\n# YOLOv4\n# model-engine-file=yolo/facemask_y4tiny_1024_608_fp16.trt\n# model-engine-file=yolo/maskcam_y4t_1184_672_fp16.trt\n# model-engine-file=yolo/maskcam_y4t_1120_640_fp16.trt\nmodel-engine-file=yolo/maskcam_y4t_1024_608_fp16.trt\nlabelfile-path=yolo/data/obj.names\ncustom-lib-path=deepstream_plugin_yolov4/libnvdsinfer_custom_impl_Yolo.so\n\n# Detectnet_v2\n# tlt-encoded-model=detectnet_v2/resnet18_detector.etlt\n# tlt-model-key=tlt_encode\n# labelfile-path=detectnet_v2/labels.txt\n# input-dims=3;544;960;0 # where c = number of channels, h = height of the model input, w = width of model input, 0: implies CHW format.\n# uff-input-blob-name=input_1\n# output-blob-names=output_cov/Sigmoid;output_bbox/BiasAdd\n\nnum-detected-classes=4\n\n## 0=FP32, 1=INT8, 2=FP16 mode\nnetwork-mode=2\ngie-unique-id=1\nnetwork-type=0\n# is-classifier=0\n## 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering)\n# Default: 2\ncluster-mode=2\n# Skip inference these frames\nmaintain-aspect-ratio=0\nparse-bbox-func-name=NvDsInferParseCustomYoloV4\nengine-create-func-name=NvDsInferYoloCudaEngineGet\nscaling-filter=1\nscaling-compute-hw=1\n#output-blob-names=2012\n\n# Async mode doesn't make sense with our custom python tracker\nclassifier-async-mode=0\n\n\n[class-attrs-all]\nnms-iou-threshold=0.2\n\n# Default: 0.4\npre-cluster-threshold=0.4\n"
  },
  {
    "path": "maskcam_run.py",
    "content": "#!/usr/bin/env python3\n\n################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\nimport sys\nimport json\nimport shutil\nimport signal\nimport threading\nimport multiprocessing as mp\n\n# Avoids random hangs in child processes (https://pythonspeed.com/articles/python-multiprocessing/)\nmp.set_start_method(\"spawn\")  # noqa\n\nfrom rich.console import Console\nfrom datetime import datetime, timedelta\n\nfrom maskcam.prints import print_run as print\nfrom maskcam.config import config, print_config_overrides\nfrom maskcam.common import USBCAM_PROTOCOL, RASPICAM_PROTOCOL\nfrom maskcam.common import (\n    CMD_FILE_SAVE,\n    CMD_STREAMING_START,\n    CMD_STREAMING_STOP,\n    CMD_INFERENCE_RESTART,\n    CMD_FILESERVER_RESTART,\n    CMD_STATUS_REQUEST,\n)\nfrom maskcam.utils import (\n    get_ip_address,\n    ADDRESS_UNKNOWN_LABEL,\n    load_udp_ports_filesaving,\n    get_streaming_address,\n    format_tdelta,\n)\nfrom maskcam.mqtt_common import mqtt_connect_broker, mqtt_send_msg\nfrom maskcam.mqtt_common import (\n    MQTT_BROKER_IP,\n    MQTT_BROKER_PORT,\n    MQTT_DEVICE_DESCRIPTION,\n    MQTT_DEVICE_NAME,\n)\nfrom maskcam.mqtt_common import (\n    MQTT_TOPIC_ALERTS,\n    MQTT_TOPIC_FILES,\n    MQTT_TOPIC_HELLO,\n    MQTT_TOPIC_STATS,\n    MQTT_TOPIC_UPDATE,\n    MQTT_TOPIC_COMMANDS,\n)\nfrom maskcam.maskcam_inference import main as inference_main\nfrom maskcam.maskcam_filesave import main as filesave_main\nfrom maskcam.maskcam_fileserver import main as fileserver_main\nfrom maskcam.maskcam_streaming import main as streaming_main\n\n\nudp_ports_pool = set()\nconsole = Console()\n# Use threading.Event instead of mp.Event() for sigint_handler, see:\n# https://bugs.python.org/issue41606\ne_interrupt = threading.Event()\nq_commands = mp.Queue(maxsize=4)\nactive_filesave_processes = []\n\nP_INFERENCE = \"inference\"\nP_STREAMING = \"streaming\"\nP_FILESERVER = \"file-server\"\nP_FILESAVE_PREFIX = \"file-save-\"\n\nprocesses_info = {}\n\n\ndef sigint_handler(sig, frame):\n    print(\"[red]Ctrl+C pressed. Interrupting all processes...[/red]\")\n    e_interrupt.set()\n\n\ndef start_process(name, target_function, config, **kwargs):\n    e_interrupt_process = mp.Event()\n    process = mp.Process(\n        name=name,\n        target=target_function,\n        kwargs=dict(\n            e_external_interrupt=e_interrupt_process,\n            config=config,\n            **kwargs,\n        ),\n    )\n    processes_info[name] = {\"started\": datetime.now(), \"running\": True}\n    process.start()\n    print(f\"Process [yellow]{name}[/yellow] started with PID: {process.pid}\")\n    return process, e_interrupt_process\n\n\ndef terminate_process(name, process, e_interrupt_process, delete_info=False):\n    print(f\"Sending interrupt to {name} process\")\n    e_interrupt_process.set()\n    print(f\"Waiting for process [yellow]{name}[/yellow] to terminate...\")\n    process.join(timeout=10)\n    if process.is_alive():\n        print(\n            f\"[red]Forcing termination of process:[/red] [bold]{name}[/bold]\",\n            warning=True,\n        )\n        process.terminate()\n    if name in processes_info:\n        if delete_info:\n            del processes_info[name]  # Sequential processes, avoid filling memory\n        else:\n            processes_info[name].update({\"ended\": datetime.now(), \"running\": False})\n    print(f\"Process terminated: [yellow]{name}[/yellow]\\n\")\n\n\ndef new_command(command):\n    if q_commands.full():\n        print(f\"Command {command} IGNORED. Queue is full.\", error=True)\n        return\n    print(f\"Received command: [yellow]{command}[/yellow]\")\n    q_commands.put_nowait(command)\n\n\ndef mqtt_init(config):\n    if MQTT_BROKER_IP is None or MQTT_DEVICE_NAME is None:\n        print(\n            \"[red]MQTT is DISABLED[/red]\"\n            \" since MQTT_BROKER_IP or MQTT_DEVICE_NAME env vars are not defined\\n\",\n            warning=True,\n        )\n        mqtt_client = None\n    else:\n        print(f\"Connecting to MQTT server {MQTT_BROKER_IP}:{MQTT_BROKER_PORT}\")\n        print(f\"Device name: [green]{MQTT_DEVICE_NAME}[/green]\\n\\n\")\n        mqtt_client = mqtt_connect_broker(\n            client_id=MQTT_DEVICE_NAME,\n            broker_ip=MQTT_BROKER_IP,\n            broker_port=MQTT_BROKER_PORT,\n            subscribe_to=[(MQTT_TOPIC_COMMANDS, 2)],  # handles re-subscription\n            cb_success=mqtt_on_connect,\n        )\n        mqtt_client.on_message = mqtt_process_message\n\n        return mqtt_client\n\n\ndef mqtt_on_connect(mqtt_client):\n    mqtt_say_hello(mqtt_client)\n    mqtt_send_file_list(mqtt_client)\n\n\ndef mqtt_process_message(mqtt_client, userdata, message):\n    topic = message.topic\n    if topic == MQTT_TOPIC_COMMANDS:\n        payload = json.loads(message.payload.decode())\n\n        if payload[\"device_id\"] != MQTT_DEVICE_NAME:\n            return\n        command = payload[\"command\"]\n        new_command(command)\n\n\ndef mqtt_say_hello(mqtt_client):\n    return mqtt_send_msg(\n        mqtt_client,\n        MQTT_TOPIC_HELLO,\n        {\"device_id\": MQTT_DEVICE_NAME, \"description\": MQTT_DEVICE_DESCRIPTION},\n        enqueue=False,  # Will be resent on_connect\n    )\n\n\ndef mqtt_send_device_status(mqtt_client):\n    t_now = datetime.now()\n    device_address = get_ip_address()\n    is_valid_address = device_address != ADDRESS_UNKNOWN_LABEL\n    if P_INFERENCE in processes_info and processes_info[P_INFERENCE][\"running\"]:\n        inference_runtime = t_now - processes_info[P_INFERENCE][\"started\"]\n    else:\n        inference_runtime = None\n    if P_FILESERVER in processes_info and processes_info[P_FILESERVER][\"running\"]:\n        fileserver_runtime = t_now - processes_info[P_FILESERVER][\"started\"]\n    else:\n        fileserver_runtime = None\n    if P_STREAMING in processes_info and processes_info[P_STREAMING][\"running\"]:\n        streaming_address = get_streaming_address(\n            device_address,\n            config[\"maskcam\"][\"streaming-port\"],\n            config[\"maskcam\"][\"streaming-path\"],\n        )\n    else:\n        streaming_address = \"N/A\"\n    total_fsave = len(active_filesave_processes)\n    keep_n = len([p for p in active_filesave_processes if p[\"flag_keep_file\"]])\n    return mqtt_send_msg(\n        mqtt_client,\n        MQTT_TOPIC_UPDATE,\n        {\n            \"device_id\": MQTT_DEVICE_NAME,\n            \"inference_runtime\": format_tdelta(inference_runtime),\n            \"fileserver_runtime\": format_tdelta(fileserver_runtime),\n            \"streaming_address\": streaming_address,\n            \"device_address\": device_address if is_valid_address else None,\n            \"save_current_files\": f\"{keep_n}/{total_fsave}\",\n            \"time\": f\"{t_now:%H:%M:%S}\",\n        },\n        enqueue=False,  # Only latest status is interesting\n    )\n\n\ndef mqtt_send_file_list(mqtt_client):\n    server_address = get_ip_address()\n    server_port = int(config[\"maskcam\"][\"fileserver-port\"])\n    try:\n        file_list = sorted(os.listdir(config[\"maskcam\"][\"fileserver-hdd-dir\"]))\n    except FileNotFoundError:  # directory not created\n        file_list = []\n    return mqtt_send_msg(\n        mqtt_client,\n        MQTT_TOPIC_FILES,\n        {\n            \"device_id\": MQTT_DEVICE_NAME,\n            \"file_server\": f\"http://{server_address}:{server_port}\",\n            \"file_list\": file_list,\n        },\n        enqueue=False,  # Will be resent on_connect or when something changes\n    )\n\n\ndef is_alert_condition(statistics, config):\n    # Thresholds config\n    max_total_people = int(config[\"maskcam\"][\"alert-max-total-people\"])\n    min_visible_people = int(config[\"maskcam\"][\"alert-min-visible-people\"])\n    max_no_mask = float(config[\"maskcam\"][\"alert-no-mask-fraction\"])\n\n    # Calculate visible people\n    without_mask = int(statistics[\"people_without_mask\"])\n    with_mask = int(statistics[\"people_with_mask\"])\n    visible_people = with_mask + without_mask\n    is_alert = False\n    if statistics[\"people_total\"] > max_total_people:\n        is_alert = True\n    elif visible_people >= min_visible_people:\n        no_mask_fraction = float(statistics[\"people_without_mask\"]) / visible_people\n        is_alert = no_mask_fraction > max_no_mask\n\n    print(f\"[yellow]ALERT condition: {is_alert}[/yellow]\")\n    return is_alert\n\n\ndef handle_statistics(mqtt_client, stats_queue, config, is_live_input):\n    while not stats_queue.empty():\n        statistics = stats_queue.get_nowait()\n\n        if is_live_input:\n            # Alert conditions detection\n            raise_alert = is_alert_condition(statistics, config)\n            if raise_alert:\n                flag_keep_current_files()\n\n            if mqtt_client is not None:\n                topic = MQTT_TOPIC_ALERTS if raise_alert else MQTT_TOPIC_STATS\n                message = {\"device_id\": MQTT_DEVICE_NAME, **statistics}\n                mqtt_send_msg(mqtt_client, topic, message, enqueue=True)\n\n\ndef allocate_free_udp_port():\n    new_port = udp_ports_pool.pop()\n    print(f\"Allocating UDP port: {new_port}\")\n    return new_port\n\n\ndef release_udp_port(port_number):\n    print(f\"Releasing UDP port: {port_number}\")\n    udp_ports_pool.add(port_number)\n\n\ndef handle_file_saving(\n    video_period, video_duration, ram_dir, hdd_dir, force_save, mqtt_client=None\n):\n    period = timedelta(seconds=video_period)\n    duration = timedelta(seconds=video_duration)\n    latest_start = None\n    latest_number = 0\n\n    # Handle termination of previous file-saving processes and move files RAM->HDD\n    terminated_idxs = []\n    for idx, active_process in enumerate(active_filesave_processes):\n        if datetime.now() - active_process[\"started\"] >= duration:\n            finish_filesave_process(active_process, hdd_dir, force_save, mqtt_client=mqtt_client)\n            terminated_idxs.append(idx)\n        if latest_start is None or active_process[\"started\"] > latest_start:\n            latest_start = active_process[\"started\"]\n            latest_number = active_process[\"number\"]\n\n    # Remove terminated processes from list in a separated loop\n    for idx in sorted(terminated_idxs, reverse=True):\n        del active_filesave_processes[idx]\n\n    # Start new file-saving process if time has elapsed\n    if latest_start is None or (datetime.now() - latest_start >= period):\n        print(\n            \"[green]Time to start a new video file [/green]\"\n            f\" (latest started at: {format_tdelta(latest_start)})\"\n        )\n        new_process_number = latest_number + 1\n        new_process_name = f\"{P_FILESAVE_PREFIX}{new_process_number}\"\n        new_filename = f\"{datetime.today().strftime('%Y%m%d_%H%M%S')}_{new_process_number}.mp4\"\n        new_filepath = f\"{ram_dir}/{new_filename}\"\n        new_udp_port = allocate_free_udp_port()\n        process_handler, e_interrupt_process = start_process(\n            new_process_name,\n            filesave_main,\n            config,\n            output_filename=new_filepath,\n            udp_port=new_udp_port,\n        )\n        active_filesave_processes.append(\n            dict(\n                number=new_process_number,\n                name=new_process_name,\n                filepath=new_filepath,\n                filename=new_filename,\n                started=datetime.now(),\n                process_handler=process_handler,\n                e_interrupt=e_interrupt_process,\n                flag_keep_file=False,\n                udp_port=new_udp_port,\n            )\n        )\n\n\ndef finish_filesave_process(active_process, hdd_dir, force_filesave, mqtt_client=None):\n    terminate_process(\n        active_process[\"name\"],\n        active_process[\"process_handler\"],\n        active_process[\"e_interrupt\"],\n        delete_info=True,\n    )\n    release_udp_port(active_process[\"udp_port\"])\n\n    # Move file to its definitive place if flagged, otherwise remove it\n    if active_process[\"flag_keep_file\"] or force_filesave:\n        definitive_filepath = f\"{hdd_dir}/{active_process['filename']}\"\n        print(f\"Force file saving: {bool(force_filesave)}\")\n        print(f\"Permanent video file created: [green]{definitive_filepath}[/green]\")\n        # Must use shutil here to move RAM->HDD\n        shutil.move(active_process[\"filepath\"], definitive_filepath)\n        # Send updated file list via MQTT (prints ignore if mqtt_client is None)\n        mqtt_send_file_list(mqtt_client)\n    else:\n        print(f\"Removing RAM video file: {active_process['filepath']}\")\n        os.remove(active_process[\"filepath\"])\n\n\ndef flag_keep_current_files():\n    print(\"Request to [green]save current video files[/green]\")\n    for process in active_filesave_processes:\n        print(f\"Set flag to keep: [green]{process['filename']}[/green]\")\n        process[\"flag_keep_file\"] = True\n\n\nif __name__ == \"__main__\":\n    if len(sys.argv) > 2:\n        print(\n            \"\"\"Usage: python3 maskcam_run.py [ URI ]\n        Examples:\n        \\t$ python3 maskcam_run.py\n        \\t$ python3 maskcam_run.py file:///absolute/path/to/file.mp4\n        \\t$ python3 maskcam_run.py v4l2:///dev/video1\n        \\t$ python3 maskcam_run.py argus://0\n\n        Notes:\n        \\t - If no URI is provided, will use default-input defined in config_maskcam.txt\n        \\t - If a file:///path/file.mp4 is provided, the output will be ./output_file.mp4\n        \\t - If the input is a live camera, the output will be consecutive\n        \\t   video files under /dev/shm/date_time.mp4\n        \\t   according to the time interval defined in output-chunks-duration in config_maskcam.txt.\n        \"\"\"\n        )\n        sys.exit(0)\n    try:\n        # Print any ENV var config override to avoid confusions\n        print_config_overrides()\n\n        # Input source\n        if len(sys.argv) > 1:\n            input_filename = sys.argv[1]\n            print(f\"Provided input source: {input_filename}\")\n        else:\n            input_filename = config[\"maskcam\"][\"default-input\"]\n            print(f\"Using input from config file: {input_filename}\")\n\n        # Input type: file or live camera\n        is_usbcamera = USBCAM_PROTOCOL in input_filename\n        is_raspicamera = RASPICAM_PROTOCOL in input_filename\n        is_live_input = is_usbcamera or is_raspicamera\n\n        # Streaming enabled by default?\n        streaming_autostart = int(config[\"maskcam\"][\"streaming-start-default\"])\n\n        # Fileserver: sequentially save videos (only for camera input)\n        fileserver_enabled = is_live_input and int(config[\"maskcam\"][\"fileserver-enabled\"])\n        fileserver_period = int(config[\"maskcam\"][\"fileserver-video-period\"])\n        fileserver_duration = int(config[\"maskcam\"][\"fileserver-video-duration\"])\n        fileserver_force_save = int(config[\"maskcam\"][\"fileserver-force-save\"])\n        fileserver_ram_dir = config[\"maskcam\"][\"fileserver-ram-dir\"]\n        fileserver_hdd_dir = config[\"maskcam\"][\"fileserver-hdd-dir\"]\n\n        # Inference restart timeout\n        tout_inference_restart = int(config[\"maskcam\"][\"timeout-inference-restart\"])\n        if is_live_input and tout_inference_restart:\n            tout_inference_restart = timedelta(seconds=tout_inference_restart)\n        else:\n            tout_inference_restart = 0\n\n        # Filesave processes: load available ports\n        load_udp_ports_filesaving(config, udp_ports_pool)\n\n        # Should only have 1 element at a time unless this thread gets blocked\n        stats_queue = mp.Queue(maxsize=5)\n\n        # Init MQTT or set these to None\n        if is_live_input:\n            mqtt_client = mqtt_init(config)\n        else:\n            mqtt_client = None\n\n        # SIGINT handler (Ctrl+C)\n        signal.signal(signal.SIGINT, sigint_handler)\n        print(\"[green bold]Press Ctrl+C to stop all processes[/green bold]\")\n\n        process_inference = None\n        process_streaming = None\n        process_fileserver = None\n        e_inference_ready = mp.Event()\n\n        if fileserver_enabled:\n            process_fileserver, e_interrupt_fileserver = start_process(\n                P_FILESERVER, fileserver_main, config, directory=fileserver_hdd_dir\n            )\n\n        if streaming_autostart:\n            print(\"[yellow]Starting streaming (streaming-start-default is set)[/yellow]\")\n            new_command(CMD_STREAMING_START)\n\n        # Inference process: If input is a file, also saves file\n        output_filename = None if is_live_input else f\"output_{input_filename.split('/')[-1]}\"\n        process_inference, e_interrupt_inference = start_process(\n            P_INFERENCE,\n            inference_main,\n            config,\n            input_filename=input_filename,\n            output_filename=output_filename,\n            stats_queue=stats_queue,\n            e_ready=e_inference_ready,\n        )\n\n        while not e_interrupt.is_set():\n            # Send MQTT statistics, detect alarm events and request file-saving\n            handle_statistics(mqtt_client, stats_queue, config, is_live_input)\n\n            # Handle sequential file saving processes, only after inference process is ready\n            if e_inference_ready.is_set():\n                if fileserver_enabled and is_live_input:  # server can be enabled via MQTT\n                    handle_file_saving(\n                        fileserver_period,\n                        fileserver_duration,\n                        fileserver_ram_dir,\n                        fileserver_hdd_dir,\n                        fileserver_force_save,\n                        mqtt_client=mqtt_client,\n                    )\n\n            if not q_commands.empty():\n                command = q_commands.get_nowait()\n                reply_updated_status = False\n                print(f\"Processing command: [yellow]{command}[yellow]\")\n                if command == CMD_STREAMING_START:\n                    if process_streaming is None or not process_streaming.is_alive():\n                        process_streaming, e_interrupt_streaming = start_process(\n                            P_STREAMING, streaming_main, config\n                        )\n                    reply_updated_status = True\n                elif command == CMD_STREAMING_STOP:\n                    if process_streaming is not None and process_streaming.is_alive():\n                        terminate_process(P_STREAMING, process_streaming, e_interrupt_streaming)\n                    reply_updated_status = True\n                elif command == CMD_INFERENCE_RESTART:\n                    if process_inference.is_alive():\n                        terminate_process(P_INFERENCE, process_inference, e_interrupt_inference)\n                    process_inference, e_interrupt_inference = start_process(\n                        P_INFERENCE,\n                        inference_main,\n                        config,\n                        input_filename=input_filename,\n                        output_filename=output_filename,\n                        stats_queue=stats_queue,\n                    )\n                    reply_updated_status = True\n                elif command == CMD_FILESERVER_RESTART:\n                    if process_fileserver is not None and process_fileserver.is_alive():\n                        terminate_process(P_FILESERVER, process_fileserver, e_interrupt_fileserver)\n                    process_fileserver, e_interrupt_fileserver = start_process(\n                        P_FILESERVER,\n                        fileserver_main,\n                        config,\n                        directory=fileserver_hdd_dir,\n                    )\n                    fileserver_enabled = True\n                    reply_updated_status = True\n                elif command == CMD_FILE_SAVE:\n                    flag_keep_current_files()\n                    reply_updated_status = True\n                elif command == CMD_STATUS_REQUEST:\n                    reply_updated_status = True\n                else:\n                    print(\"[red]Command not recognized[/red]\", error=True)\n\n                if reply_updated_status:\n                    mqtt_send_device_status(mqtt_client)\n            else:\n                e_interrupt.wait(timeout=0.1)\n\n            # Routine check: finish loop if the inference process is dead\n            if not process_inference.is_alive():\n                e_interrupt.set()\n\n            # Routine check: restart inference at given interval (only live_input)\n            if tout_inference_restart:\n                inference_runtime = datetime.now() - processes_info[P_INFERENCE][\"started\"]\n                if inference_runtime > tout_inference_restart:\n                    print(\n                        \"[yellow]Restarting inference due to timeout-inference-restart\"\n                        f\"(inference runtime: {format_tdelta(inference_runtime)})[/yellow]\"\n                    )\n                    new_command(CMD_INFERENCE_RESTART)\n\n    except:  # noqa\n        console.print_exception()\n\n    # Terminate all running processes, avoid breaking on any exception\n    for active_file_process in active_filesave_processes:\n        try:\n            finish_filesave_process(\n                active_file_process,\n                fileserver_hdd_dir,\n                fileserver_force_save,\n                mqtt_client=mqtt_client,\n            )\n        except:  # noqa\n            console.print_exception()\n    try:\n        if process_inference is not None and process_inference.is_alive():\n            terminate_process(P_INFERENCE, process_inference, e_interrupt_inference)\n    except:  # noqa\n        console.print_exception()\n    try:\n        if process_fileserver is not None and process_fileserver.is_alive():\n            terminate_process(P_FILESERVER, process_fileserver, e_interrupt_fileserver)\n    except:  # noqa\n        console.print_exception()\n    try:\n        if process_streaming is not None and process_streaming.is_alive():\n            terminate_process(P_STREAMING, process_streaming, e_interrupt_streaming)\n    except:  # noqa\n        console.print_exception()\n"
  },
  {
    "path": "requirements-dev.in",
    "content": "# dev deps\npip-tools\nblack\nflake8\njupyter\nipython\nipdb"
  },
  {
    "path": "requirements.in",
    "content": "# General deps\nnumpy\nscipy\nPyYAML\nipdb\n\n# MQTT\npaho-mqtt\n\n# Tracker\nnorfair\n\n# Colored prints and traceback\nrich\n"
  },
  {
    "path": "requirements.txt",
    "content": "# Versions frozen after `pip install -r requirements.in -c constraints.docker`\n# under balenalib/jetson-nano-ubuntu:bionic\nasn1crypto==0.24.0\nattrs==17.4.0\nbackcall==0.2.0\ncolorama==0.4.4\ncommonmark==0.9.1\ncryptography==2.1.4\ncycler==0.10.0\nCython==0.26.1\ndataclasses==0.7\ndecorator==4.1.2\nfilterpy==1.4.5\nidna==2.6\nipdb==0.13.4\nipython==7.16.1\nipython-genutils==0.2.0\njedi==0.18.0\nJetson.GPIO==2.0.8\njoblib==0.11\nkeyring==10.6.0\nkeyrings.alt==3.0\nmatplotlib==2.1.1\nnorfair==0.1.8\nnose==1.3.7\nnumpy==1.13.3\nolefile==0.45.1\nopencv-python==3.2.0\npaho-mqtt==1.5.1\nparso==0.8.1\npexpect==4.8.0\npickleshare==0.7.5\nPillow==5.1.0\npluggy==0.6.0\nprompt-toolkit==3.0.15\nptyprocess==0.7.0\npy==1.5.2\npycrypto==2.6.1\npyds==1.0.1\nPygments==2.7.4\npygobject==3.26.1\npyparsing==2.2.0\npytest==3.3.2\npython-dateutil==2.6.1\npytz==2018.3\npyxdg==0.25\nPyYAML==5.4.1\nrich==6.2.0\nscikit==0.19.1\nscikit-learn==0.19.1\nscipy==0.19.1\nSecretStorage==2.3.1\nsimplejson==3.13.2\nsix==1.11.0\ntensorrt==7.1.3.0\ntraitlets==4.3.3\ntyping-extensions==3.7.4.3\nwcwidth==0.2.5"
  },
  {
    "path": "server/backend/Dockerfile",
    "content": "FROM tiangolo/uvicorn-gunicorn-fastapi:python3.7\n\nCOPY ./app /app\nCOPY requirements.txt /app/requirements.txt\n\nENV PYTHONPATH=/app\nWORKDIR /app\n\nRUN python -m pip install --upgrade pip && pip install -r requirements.txt\n"
  },
  {
    "path": "server/backend/alembic.ini",
    "content": "# A generic, single database configuration.\n\n[alembic]\n# path to migration scripts\nscript_location = app/db/migrations\n\n# template used to generate migration files\n# file_template = %%(rev)s_%%(slug)s\n\n# timezone to use when rendering the date\n# within the migration file as well as the filename.\n# string value is passed to dateutil.tz.gettz()\n# leave blank for localtime\n# timezone =\n\n# max length of characters to apply to the\n# \"slug\" field\n# truncate_slug_length = 40\n\n# set to 'true' to run the environment during\n# the 'revision' command, regardless of autogenerate\n# revision_environment = false\n\n# set to 'true' to allow .pyc and .pyo files without\n# a source .py file to be detected as revisions in the\n# versions/ directory\n# sourceless = false\n\n# version location specification; this defaults\n# to alembic/versions.  When using multiple version\n# directories, initial revisions must be specified with --version-path\n# version_locations = %(here)s/bar %(here)s/bat alembic/versions\n\n# the output encoding used when revision files\n# are written from script.py.mako\n# output_encoding = utf-8\n\n\n[post_write_hooks]\n# post_write_hooks defines scripts or Python functions that are run\n# on newly generated revision scripts.  See the documentation for further\n# detail and examples\n\n# format using \"black\" - use the console_scripts runner, against the \"black\" entrypoint\n# hooks=black\n# black.type=console_scripts\n# black.entrypoint=black\n# black.options=-l 79\n\n# Logging configuration\n[loggers]\nkeys = root,sqlalchemy,alembic\n\n[handlers]\nkeys = console\n\n[formatters]\nkeys = generic\n\n[logger_root]\nlevel = WARN\nhandlers = console\nqualname =\n\n[logger_sqlalchemy]\nlevel = WARN\nhandlers =\nqualname = sqlalchemy.engine\n\n[logger_alembic]\nlevel = INFO\nhandlers =\nqualname = alembic\n\n[handler_console]\nclass = StreamHandler\nargs = (sys.stderr,)\nlevel = NOTSET\nformatter = generic\n\n[formatter_generic]\nformat = %(levelname)-5.5s [%(name)s] %(message)s\ndatefmt = %H:%M:%S\n"
  },
  {
    "path": "server/backend/app/api/__init__.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom .exceptions import NoItemFoundException, GenericException, ItemAlreadyExist\nfrom .routes.device_routes import device_router\nfrom .routes.statistic_routes import statistic_router\n"
  },
  {
    "path": "server/backend/app/api/exceptions.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom fastapi import HTTPException\n\n\nclass NoItemFoundException(HTTPException):\n    def __init__(self):\n        super().__init__(\n            status_code=404,\n            detail=\"No item was found for the provided ID\",\n        )\n\n\nclass ItemAlreadyExist(HTTPException):\n    def __init__(self):\n        super().__init__(\n            status_code=500,\n            detail=\"An instance with the same id already exist\",\n        )\n\n\nclass GenericException(HTTPException):\n    def __init__(self, message: str):\n        super().__init__(\n            status_code=500,\n            detail=f\"An error occurred: \\n{message}\",\n        )\n"
  },
  {
    "path": "server/backend/app/api/routes/device_routes.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom typing import Dict, List, Optional\n\nfrom app.api import GenericException, ItemAlreadyExist, NoItemFoundException\nfrom app.db.cruds import (\n    create_device,\n    delete_device,\n    get_device,\n    get_devices,\n    update_device,\n    get_files_by_device\n)\nfrom app.db.schema import DeviceSchema, VideoFileSchema, get_db_generator\nfrom fastapi import APIRouter, Depends\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.exc import DataError, IntegrityError\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm.exc import NoResultFound\n\ndevice_router = APIRouter()\n\n\n@device_router.post(\"/devices\", response_model=DeviceSchema)\ndef create_device_item(\n    device_information: DeviceSchema,\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Create device.\n\n    Arguments:\n        device_information {DeviceSchema} -- New device information.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[DeviceSchema, ItemAlreadyExist] -- Device instance that was added\n        to the database or an error in case the device already exists.\n    \"\"\"\n    try:\n        device_information = jsonable_encoder(device_information)\n        return create_device(\n            db_session=db, device_information=device_information\n        )\n    except IntegrityError:\n        raise ItemAlreadyExist()\n\n\n@device_router.get(\"/devices/{device_id}\", response_model=DeviceSchema)\ndef get_device_item(\n    device_id: str,\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Get existing device.\n\n    Arguments:\n        device_id {str} -- Device id.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[DeviceSchema, NoItemFoundException] -- Device instance which id is device_id\n        or an exception in case there's no matching device.\n\n    \"\"\"\n    try:\n        return get_device(db_session=db, device_id=device_id)\n    except NoResultFound:\n        raise NoItemFoundException()\n\n\n@device_router.get(\n    \"/devices\",\n    response_model=List[DeviceSchema],\n    response_model_include={\"id\", \"description\", \"file_server_address\"},\n)\ndef get_devices_items(db: Session = Depends(get_db_generator)):\n    \"\"\"\n    Get all existing devices.\n\n    Arguments:\n        db {Session} -- Database session.\n\n    Returns:\n        List[DeviceSchema] -- All device instances present in the database.\n    \"\"\"\n    return get_devices(db_session=db)\n\n\n@device_router.put(\"/devices/{device_id}\", response_model=DeviceSchema)\ndef update_device_item(\n    device_id: str,\n    new_device_information: Dict = {},\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Modify a device.\n\n    Arguments:\n        device_id {str} -- Device id.\n        new_device_information {Dict} -- New device information.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[DeviceSchema, NoItemFoundException, GenericException] -- Device instance\n        which id is device_id or an exception in case there's no matching device.\n    \"\"\"\n    try:\n        return update_device(\n            db_session=db,\n            device_id=device_id,\n            new_device_information=new_device_information,\n        )\n    except NoResultFound:\n        raise NoItemFoundException()\n    except DataError as e:\n        raise GenericException(e)\n\n\n@device_router.delete(\"/devices/{device_id}\", response_model=DeviceSchema)\ndef delete_device_item(\n    device_id: str,\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Delete a device.\n\n    Arguments:\n        device_id {str} -- Device id.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[DeviceSchema, NoItemFoundException, GenericException] -- Device instance that\n        was deleted or an exception in case there's no matching device.\n    \"\"\"\n    try:\n        return delete_device(db_session=db, device_id=device_id)\n    except NoResultFound:\n        raise NoItemFoundException()\n    except DataError as e:\n        raise GenericException(e)\n\n\n@device_router.get(\"/files/{device_id}\", response_model=List[VideoFileSchema])\ndef get_device_files(\n    device_id: str,\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Get existing video files in device.\n\n    Arguments:\n        device_id {str} -- Device id.\n        db {Session} -- Database session.\n\n    Returns:\n        List[VideoFileSchema] -- VideoFile instances which device_id matches\n    \"\"\"\n    return get_files_by_device(db_session=db, device_id=device_id)\n"
  },
  {
    "path": "server/backend/app/api/routes/statistic_routes.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom typing import Dict, List, Optional\n\nfrom app.api import GenericException, ItemAlreadyExist, NoItemFoundException\nfrom app.db.cruds import (\n    create_statistic,\n    delete_statistic,\n    get_statistic,\n    get_statistics,\n    get_statistics_from_to,\n    update_statistic,\n)\nfrom app.db.schema import StatisticSchema, get_db_generator\nfrom app.db.utils import convert_timestamp_to_datetime, get_enum_type\n\nfrom fastapi import APIRouter, Depends, Query\nfrom sqlalchemy.exc import DataError, IntegrityError\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm.exc import NoResultFound\n\nstatistic_router = APIRouter()\n\n\n@statistic_router.post(\n    \"/devices/{device_id}/statistics\", response_model=StatisticSchema\n)\ndef create_statistic_item(\n    device_id: str,\n    statistic_information: Dict = {},\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Create new statistic entry.\n\n    Arguments:\n        device_id {str} -- Device id which sent the statistic.\n        statistic_information {Dict} -- New statistic information.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[StatisticSchema, ItemAlreadyExist] -- Statistic instance that was added\n        to the database or an exception in case a statistic already exists.\n    \"\"\"\n    try:\n        # Format input data\n        statistic_information[\"device_id\"] = device_id\n        statistic_information[\"datetime\"] = convert_timestamp_to_datetime(\n            statistic_information[\"datetime\"]\n        )\n        statistic_information[\"statistic_type\"] = get_enum_type(\n            statistic_information[\"statistic_type\"]\n        )\n\n        return create_statistic(\n            db_session=db, statistic_information=statistic_information\n        )\n    except IntegrityError:\n        raise ItemAlreadyExist()\n\n\n@statistic_router.get(\n    \"/devices/{device_id}/statistics/{timestamp}\",\n    response_model=StatisticSchema,\n)\ndef get_statistic_item(\n    device_id: str,\n    timestamp: float,\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Get a specific statistic.\n\n    Arguments:\n        device_id {str} -- Device id.\n        timestamp {float} -- Timestamp when the device registered the information.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[StatisticSchema, NoItemFoundException] -- Statistic instance defined by device_id and\n        timestamp or an exception in case there's no matching statistic.\n    \"\"\"\n    try:\n        return get_statistic(\n            db_session=db,\n            device_id=device_id,\n            datetime=convert_timestamp_to_datetime(timestamp),\n        )\n    except NoResultFound:\n        raise NoItemFoundException()\n\n\n@statistic_router.get(\n    \"/devices/{device_id}/statistics\",\n    response_model=List[StatisticSchema],\n)\ndef get_all_device_statistics_items(\n    device_id: str,\n    datefrom: Optional[str] = Query(None),\n    dateto: Optional[str] = Query(None),\n    timestampfrom: Optional[float] = Query(None),\n    timestampto: Optional[float] = Query(None),\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Get all statistics of a specific device.\n\n    Arguments:\n        device_id {str} -- Device id.\n        datefrom {Optional[str]} -- Datetime to show information from.\n        dateto {Optional[str]} -- Datetime to show information to.\n        timestampfrom {Optional[float]} -- Timestamp to show information from.\n        timestampto {Optional[float]} -- Timestamp to show information from.\n        db {Session} -- Database session.\n\n    Returns:\n        List[StatisticSchema] -- Statistic instances defined by device_id and\n        datetime range.\n    \"\"\"\n    from_datetime = datefrom\n    to_datetime = dateto\n\n    if not from_datetime and timestampfrom:\n        from_datetime = convert_timestamp_to_datetime(timestampfrom)\n\n    if not to_datetime and timestampto:\n        to_datetime = convert_timestamp_to_datetime(timestampto)\n\n    return get_statistics_from_to(\n        db_session=db,\n        device_id=device_id,\n        from_date=from_datetime,\n        to_date=to_datetime,\n    )\n\n\n@statistic_router.get(\n    \"/statistics\",\n    response_model=List[StatisticSchema],\n)\ndef get_all_statistics_items(db: Session = Depends(get_db_generator)):\n    \"\"\"\n    Get all statistics from all devices.\n\n    Arguments:\n        db {Session} -- Database session.\n\n    Returns:\n        List[StatisticSchema] -- All statistic instances present in the database.\n    \"\"\"\n    return get_statistics(db_session=db)\n\n\n@statistic_router.put(\n    \"/devices/{device_id}/statistics/{timestamp}\",\n    response_model=StatisticSchema,\n)\ndef update_statistic_item(\n    device_id: str,\n    timestamp: float,\n    new_statistic_information: Dict = {},\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Modify a specific statistic.\n\n    Arguments:\n        device_id {str} -- Device id.\n        timestamp {float} -- Timestamp when the device registered the information.\n        new_statistic_information {Dict} -- New statistic information.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[StatisticSchema, NoItemFoundException, GenericException] -- Updated statistic\n        instance defined by device_id and datetime or an exception in case there's no\n        matching statistic.\n    \"\"\"\n    try:\n        return update_statistic(\n            db_session=db,\n            device_id=device_id,\n            datetime=convert_timestamp_to_datetime(timestamp),\n            new_statistic_information=new_statistic_information,\n        )\n\n    except NoResultFound:\n        raise NoItemFoundException()\n    except DataError as e:\n        raise GenericException(e)\n\n\n@statistic_router.delete(\n    \"/devices/{device_id}/statistics/{timestamp}\",\n    response_model=StatisticSchema,\n)\ndef delete_statistic_item(\n    device_id: str,\n    timestamp: float,\n    db: Session = Depends(get_db_generator),\n):\n    \"\"\"\n    Delete a specific statistic.\n\n    Arguments:\n        device_id {str} -- Device id.\n        timestamp {float} -- Timestamp when the device registered the information.\n        db {Session} -- Database session.\n\n    Returns:\n        Union[StatisticSchema, NoItemFoundException, GenericException] -- Statistic instance\n        that was deleted or an exception in case there's no matching statistic.\n    \"\"\"\n    try:\n        return delete_statistic(\n            db_session=db,\n            device_id=device_id,\n            datetime=convert_timestamp_to_datetime(timestamp),\n        )\n\n    except NoResultFound:\n        raise NoItemFoundException()\n    except DataError as e:\n        raise GenericException(e)\n"
  },
  {
    "path": "server/backend/app/core/config.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\n\n# Database configuration\nDB_USER = os.environ[\"POSTGRES_USER\"]\nDB_PASSWORD = os.environ[\"POSTGRES_PASSWORD\"]\nDB_NAME = os.environ[\"POSTGRES_DB\"]\nDB_PORT = os.environ[\"POSTGRES_PORT\"]\nDB_URI = f\"postgresql://{DB_USER}:{DB_PASSWORD}@db:{DB_PORT}/{DB_NAME}\"\n\n# MQTT broker configuration\nMQTT_BROKER = os.environ[\"MQTT_BROKER\"]\nMQTT_BROKER_PORT = int(os.environ[\"MQTT_BROKER_PORT\"])\n\n# MQTT subscriber configuration\nSUBSCRIBER_CLIENT_ID = os.environ[\"SUBSCRIBER_CLIENT_ID\"]\n\n# Topic configuration\nMQTT_HELLO_TOPIC = \"hello\"\nMQTT_ALERT_TOPIC = \"alerts\"\nMQTT_REPORT_TOPIC = \"receive-from-jetson\"\nMQTT_SEND_TOPIC = \"send-to-jetson\"\nMQTT_FILES_TOPIC = \"video-files\"\n"
  },
  {
    "path": "server/backend/app/db/cruds/__init__.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom .crud_device import (\n    create_device,\n    delete_device,\n    get_device,\n    get_devices,\n    update_device,\n)\nfrom .crud_statistic import (\n    create_statistic,\n    delete_statistic,\n    get_statistic,\n    get_statistics,\n    get_statistics_from_to,\n    update_statistic,\n)\nfrom .crud_video_file import (\n    update_files,\n    get_files_by_device,\n)\n"
  },
  {
    "path": "server/backend/app/db/cruds/crud_device.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom typing import List, Union, Dict\n\nfrom app.db.schema import DeviceModel\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm.exc import NoResultFound\n\n\ndef create_device(\n    db_session: Session, device_information: Dict = {}\n) -> Union[DeviceModel, IntegrityError]:\n    \"\"\"\n    Register new Jetson device.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_information {Dict} -- New device information.\n\n    Returns:\n        Union[DeviceModel, IntegrityError] -- Device instance that was added\n        to the database or an exception in case the device already exists.\n    \"\"\"\n    try:\n        # Replace empty spaces in device id\n        device_information[\"id\"] = device_information[\"id\"].replace(\" \", \"_\")\n\n        # Create device\n        device = DeviceModel(**device_information)\n        db_session.add(device)\n        db_session.commit()\n        db_session.refresh(device)\n        return device\n\n    except IntegrityError:\n        db_session.rollback()\n        raise\n\n\ndef get_device(\n    db_session: Session, device_id: str\n) -> Union[DeviceModel, NoResultFound]:\n    \"\"\"\n    Get a specific device.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id.\n\n    Returns:\n        Union[DeviceModel, NoResultFound] -- Device instance which id is device_id\n        or an exception in case there's no matching device.\n    \"\"\"\n    try:\n        return get_device_by_id(db_session, device_id)\n\n    except NoResultFound:\n        raise\n\n\ndef get_devices(db_session: Session) -> List[DeviceModel]:\n    \"\"\"\n    Get all devices.\n\n    Arguments:\n        db_session {Session} -- Database session.\n\n    Returns:\n        List[DeviceModel] -- All device instances present in the database.\n    \"\"\"\n    return db_session.query(DeviceModel).all()\n\n\ndef update_device(\n    db_session: Session, device_id: str, new_device_information: Dict = {}\n) -> Union[DeviceModel, NoResultFound]:\n    \"\"\"\n    Modify a specific Jetson device.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id.\n        new_device_information {Dict} -- New device information.\n\n    Returns:\n        Union[DeviceModel, NoResultFound] -- Device instance which id is device_id\n        or an exception in case there's no matching device.\n    \"\"\"\n    try:\n        try:\n            # Remove device id as it can't be modified\n            del new_device_information[\"id\"]\n        except KeyError:\n            pass\n\n        device = get_device_by_id(db_session, device_id)\n\n        for key, value in new_device_information.items():\n            if hasattr(device, key):\n                setattr(device, key, value)\n\n        db_session.commit()\n        return device\n\n    except NoResultFound:\n        raise\n\n\ndef delete_device(\n    db_session: Session, device_id: str\n) -> Union[DeviceModel, NoResultFound]:\n    \"\"\"\n    Delete a device.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id.\n\n    Returns:\n        Union[DeviceModel, NoResultFound] -- Device instance that was deleted\n        or an exception in case there's no matching device.\n\n    \"\"\"\n    try:\n        device = get_device_by_id(db_session, device_id)\n        db_session.delete(device)\n        db_session.commit()\n        return device\n\n    except NoResultFound:\n        raise\n\n\ndef get_device_by_id(\n    db_session: Session, device_id: str\n) -> Union[DeviceModel, NoResultFound]:\n    \"\"\"\n    Get a device using the table's primary key.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id.\n\n    Returns:\n        Union[DeviceModel, NoResultFound] -- Device instance which id is device_id\n        or an exception in case there's no matching device.\n\n    \"\"\"\n    device = db_session.query(DeviceModel).get(device_id)\n\n    if not device:\n        raise NoResultFound()\n\n    return device\n"
  },
  {
    "path": "server/backend/app/db/cruds/crud_statistic.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom datetime import datetime, timezone\nfrom typing import Dict, List, Optional, Union\n\nfrom app.db.schema import StatisticsModel\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm.exc import NoResultFound\n\n\ndef create_statistic(\n    db_session: Session, statistic_information: Dict = {}\n) -> Union[StatisticsModel, IntegrityError]:\n    \"\"\"\n    Register new statistic entry.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        statistic_information {Dict} -- New statistic information.\n\n    Returns:\n        Union[StatisticsModel, IntegrityError] -- Statistic instance that was added\n        to the database or an exception in case a statistic already exists.\n    \"\"\"\n    try:\n        statistic = StatisticsModel(**statistic_information)\n        db_session.add(statistic)\n        db_session.commit()\n        db_session.refresh(statistic)\n        return statistic\n\n    except IntegrityError:\n        db_session.rollback()\n        raise\n\n\ndef get_statistic(\n    db_session: Session, device_id: str, datetime: datetime\n) -> Union[StatisticsModel, NoResultFound]:\n    \"\"\"\n    Get a specific statistic.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id which sent the information.\n        datetime {datetime} -- Datetime when the device registered the information.\n\n    Returns:\n        Union[StatisticsModel, NoResultFound] -- Statistic instance defined by device_id and datetime\n        or an exception in case there's no matching statistic.\n    \"\"\"\n    try:\n        return get_statistic_by_id_and_datetime(db_session, device_id, datetime)\n\n    except NoResultFound:\n        raise\n\n\ndef get_statistics(\n    db_session: Session, device_id: Optional[str] = None\n) -> List[StatisticsModel]:\n    \"\"\"\n    Get all statistics.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {Optional[str]} -- Device id.\n\n    Returns:\n        List[StatisticsModel] -- All statistic instances present in the database or\n        all statistics from a specific device.\n    \"\"\"\n    if device_id:\n        # Get all statistics from a specific device\n        query = db_session.query(StatisticsModel)\n        return query.filter(StatisticsModel.device_id == device_id).all()\n\n    else:\n        # Get all statistics from form the database\n        return db_session.query(StatisticsModel).all()\n\n\ndef get_statistics_from_to(\n    db_session: Session,\n    device_id: str,\n    from_date: Optional[str] = None,\n    to_date: Optional[str] = None,\n) -> List[StatisticsModel]:\n    \"\"\"\n    Get all statistics within a datetime range.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Device id.\n        from_date {Optional[str]} -- Beginning of datetime range.\n        to_date {Optional[str]} -- End of datetime range.\n\n    Returns:\n        List[StatisticsModel] -- All statistic instances present in the database\n        within a given datetime range.\n    \"\"\"\n    query = db_session.query(StatisticsModel)\n    query = query.filter(StatisticsModel.device_id == device_id)\n\n    if to_date is None:\n        # By default, show information until the current day\n        to_date = datetime.now(timezone.utc).strftime(\"%Y-%m-%dT%H:%M:%S\")\n\n    if from_date:\n        return query.filter(\n            StatisticsModel.datetime.between(from_date, to_date)\n        ).all()\n\n    return query.filter(StatisticsModel.datetime <= to_date).all()\n\n\ndef update_statistic(\n    db_session: Session,\n    device_id: str,\n    datetime: datetime,\n    new_statistic_information: Dict = {},\n) -> Union[StatisticsModel, NoResultFound]:\n    \"\"\"\n    Modify a specific statistic.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id which sent the information.\n        datetime {datetime} -- Datetime when the device registered the information.\n        new_statistic_information {Dict} -- New statistic information.\n\n    Returns:\n        Union[StatisticsModel, NoResultFound] -- Updated statistic instance defined by device_id\n        and datetime or an exception in case there's no matching statistic.\n    \"\"\"\n    try:\n        try:\n            # Remove device id as it can't be modified\n            del new_statistic_information[\"device_id\"]\n        except KeyError:\n            pass\n\n        try:\n            # Remove datetime as it can't be modified\n            del new_statistic_information[\"datetime\"]\n        except KeyError:\n            pass\n\n        statistic = get_statistic_by_id_and_datetime(\n            db_session, device_id, datetime\n        )\n\n        for key, value in new_statistic_information.items():\n            if hasattr(statistic, key):\n                setattr(statistic, key, value)\n\n        db_session.commit()\n        return statistic\n\n    except NoResultFound:\n        raise\n\n\ndef delete_statistic(\n    db_session: Session, device_id: str, datetime: datetime\n) -> Union[StatisticsModel, NoResultFound]:\n    \"\"\"\n    Delete a specific statistic.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id which sent the information.\n        datetime {datetime} -- Datetime when the device registered the information.\n\n    Returns:\n        Union[StatisticsModel, NoResultFound] -- Statistic instance that was deleted\n        or an exception in case there's no matching statistic.\n    \"\"\"\n    try:\n        statistic = get_statistic_by_id_and_datetime(\n            db_session, device_id, datetime\n        )\n        db_session.delete(statistic)\n        db_session.commit()\n        return statistic\n\n    except NoResultFound:\n        raise\n\n\ndef get_statistic_by_id_and_datetime(\n    db_session: Session, device_id: str, datetime: datetime\n) -> Union[StatisticsModel, NoResultFound]:\n    \"\"\"\n    Get a statistic using the table's primary keys.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id which sent the information.\n        datetime {datetime} -- Datetime when the device registered the information.\n\n    Returns:\n        Union[StatisticsModel, NoResultFound] -- Statistic instance defined by device_id and\n        datetime or an exception in case there's no matching statistic.\n    \"\"\"\n    statistic = db_session.query(StatisticsModel).get((device_id, datetime))\n\n    if not statistic:\n        raise NoResultFound()\n\n    return statistic\n"
  },
  {
    "path": "server/backend/app/db/cruds/crud_video_file.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom datetime import datetime, timezone\nfrom typing import Dict, List, Optional, Union\n\nfrom app.db.schema import VideoFilesModel\n\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy.orm.exc import NoResultFound\n\n\ndef update_files(\n    db_session: Session,\n    device_id: str,\n    file_list: List,\n) -> List[VideoFilesModel]:\n    \"\"\"\n    Update the whole list of available files for this device\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id which sent the information.\n        file_list {List} -- List of all available files in the device\n\n    Returns:\n        VideoFilesModel -- Updated video_files instance defined by device_id\n    \"\"\"\n    # Remove all previous files for device_id\n    query = db_session.query(VideoFilesModel)\n    query = query.filter(VideoFilesModel.device_id == device_id)\n    query.delete(synchronize_session=False)\n    db_session.commit()\n\n    # Add new list\n    result = []\n    for new_file in file_list:\n        file_add = VideoFilesModel(device_id=device_id, video_name=new_file)\n        db_session.add(file_add)\n        result.append(file_add)\n\n    db_session.commit()\n    return result\n\n\ndef get_files_by_device(\n    db_session: Session, device_id: str\n) -> List[VideoFilesModel]:\n    \"\"\"\n    Get a file using the table's primary keys.\n\n    Arguments:\n        db_session {Session} -- Database session.\n        device_id {str} -- Jetson id to query files\n\n    Returns:\n        List[VideoFilesModel] -- All video files for the device\n    \"\"\"\n    query = db_session.query(VideoFilesModel)\n    return query.filter(VideoFilesModel.device_id == device_id).all()\n"
  },
  {
    "path": "server/backend/app/db/migrations/env.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom logging.config import fileConfig\n\nfrom alembic import context\nfrom app.core import config as app_config\nfrom app.db.schema import Base\nfrom sqlalchemy import engine_from_config, pool\n\n# this is the Alembic Config object, which provides\n# access to the values within the .ini file in use.\nconfig = context.config\n\nconfig.set_main_option(\"sqlalchemy.url\", app_config.DB_URI)\n\n# Interpret the config file for Python logging.\n# This line sets up loggers basically.\nfileConfig(config.config_file_name)\n\ntarget_metadata = Base.metadata\n\n\ndef run_migrations_offline():\n    \"\"\"\n    Run migrations in 'offline' mode.\n\n    This configures the context with just a URL\n    and not an Engine, though an Engine is acceptable\n    here as well.  By skipping the Engine creation\n    we don't even need a DBAPI to be available.\n\n    Calls to context.execute() here emit the given string to the\n    script output.\n    \"\"\"\n    url = config.get_main_option(\"sqlalchemy.url\")\n    context.configure(\n        url=url,\n        target_metadata=target_metadata,\n        literal_binds=True,\n        dialect_opts={\"paramstyle\": \"named\"},\n    )\n\n    with context.begin_transaction():\n        context.run_migrations()\n\n\ndef run_migrations_online():\n    \"\"\"\n    Run migrations in 'online' mode.\n\n    In this scenario we need to create an Engine\n    and associate a connection with the context.\n    \"\"\"\n    connectable = engine_from_config(\n        config.get_section(config.config_ini_section),\n        prefix=\"sqlalchemy.\",\n        poolclass=pool.NullPool,\n    )\n\n    with connectable.connect() as connection:\n        context.configure(\n            connection=connection, target_metadata=target_metadata\n        )\n\n        with context.begin_transaction():\n            context.run_migrations()\n\n\nif context.is_offline_mode():\n    run_migrations_offline()\nelse:\n    run_migrations_online()\n"
  },
  {
    "path": "server/backend/app/db/migrations/script.py.mako",
    "content": "\"\"\"${message}\n\nRevision ID: ${up_revision}\nRevises: ${down_revision | comma,n}\nCreate Date: ${create_date}\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n${imports if imports else \"\"}\n\n# revision identifiers, used by Alembic.\nrevision = ${repr(up_revision)}\ndown_revision = ${repr(down_revision)}\nbranch_labels = ${repr(branch_labels)}\ndepends_on = ${repr(depends_on)}\n\n\ndef upgrade():\n    ${upgrades if upgrades else \"pass\"}\n\n\ndef downgrade():\n    ${downgrades if downgrades else \"pass\"}\n"
  },
  {
    "path": "server/backend/app/db/migrations/versions/6a4d853aabce_added_database.py",
    "content": "\"\"\"Added database\n\nRevision ID: 6a4d853aabce\nRevises: 8f58cd776eda\nCreate Date: 2020-12-23 13:49:00.640607\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6a4d853aabce'\ndown_revision = '8f58cd776eda'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    pass\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    pass\n    # ### end Alembic commands ###\n"
  },
  {
    "path": "server/backend/app/db/migrations/versions/6d5c250f098c_added_device_file_server_address.py",
    "content": "\"\"\"Added Device.file_server_address\n\nRevision ID: 6d5c250f098c\nRevises: fb245977373f\nCreate Date: 2021-01-26 14:55:11.504148\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '6d5c250f098c'\ndown_revision = 'fb245977373f'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.add_column('device', sa.Column('file_server_address', sa.String(), nullable=True))\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.drop_column('device', 'file_server_address')\n    # ### end Alembic commands ###\n"
  },
  {
    "path": "server/backend/app/db/migrations/versions/8f58cd776eda_added_database.py",
    "content": "\"\"\"Added database\n\nRevision ID: 8f58cd776eda\nRevises: \nCreate Date: 2020-12-23 13:38:47.314749\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '8f58cd776eda'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.create_table('device',\n    sa.Column('id', sa.String(), nullable=False),\n    sa.Column('description', sa.String(), nullable=True),\n    sa.PrimaryKeyConstraint('id')\n    )\n    op.create_table('statistic',\n    sa.Column('device_id', sa.String(), nullable=False),\n    sa.Column('datetime', sa.DateTime(), nullable=False),\n    sa.Column('statistic_type', sa.Enum('REPORT', 'ALERT', name='statistictypeenum'), nullable=True),\n    sa.Column('people_with_mask', sa.Integer(), nullable=True),\n    sa.Column('people_without_mask', sa.Integer(), nullable=True),\n    sa.Column('people_total', sa.Integer(), nullable=True),\n    sa.ForeignKeyConstraint(['device_id'], ['device.id'], ),\n    sa.PrimaryKeyConstraint('device_id', 'datetime')\n    )\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.drop_table('statistic')\n    op.drop_table('device')\n    # ### end Alembic commands ###\n"
  },
  {
    "path": "server/backend/app/db/migrations/versions/fb245977373f_added_video_file_table.py",
    "content": "\"\"\"Added video_file table\n\nRevision ID: fb245977373f\nRevises: 6a4d853aabce\nCreate Date: 2021-01-21 22:47:20.335928\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'fb245977373f'\ndown_revision = '6a4d853aabce'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.create_table('video_file',\n    sa.Column('device_id', sa.String(), nullable=False),\n    sa.Column('video_name', sa.String(), nullable=False),\n    sa.ForeignKeyConstraint(['device_id'], ['device.id'], ),\n    sa.PrimaryKeyConstraint('device_id', 'video_name')\n    )\n    op.alter_column('statistic', 'people_total',\n               existing_type=sa.INTEGER(),\n               nullable=False)\n    op.alter_column('statistic', 'people_with_mask',\n               existing_type=sa.INTEGER(),\n               nullable=False)\n    op.alter_column('statistic', 'people_without_mask',\n               existing_type=sa.INTEGER(),\n               nullable=False)\n    # ### end Alembic commands ###\n\n\ndef downgrade():\n    # ### commands auto generated by Alembic - please adjust! ###\n    op.alter_column('statistic', 'people_without_mask',\n               existing_type=sa.INTEGER(),\n               nullable=True)\n    op.alter_column('statistic', 'people_with_mask',\n               existing_type=sa.INTEGER(),\n               nullable=True)\n    op.alter_column('statistic', 'people_total',\n               existing_type=sa.INTEGER(),\n               nullable=True)\n    op.drop_table('video_file')\n    # ### end Alembic commands ###\n"
  },
  {
    "path": "server/backend/app/db/schema/__init__.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom .base import Base, engine, get_db_session, get_db_generator\nfrom .models import DeviceModel, StatisticsModel, VideoFilesModel\nfrom .schemas import DeviceSchema, StatisticSchema, VideoFileSchema\n"
  },
  {
    "path": "server/backend/app/db/schema/base.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom typing import Generator, Union\n\nfrom app.core.config import DB_URI\n\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import Session, sessionmaker\n\n\ndef get_db_session() -> Session:\n    \"\"\"\n    Get a new create a new database session.\n\n    Returns:\n        Session -- New database session.\n    \"\"\"\n    db = SessionLocal()\n    try:\n        return db\n    finally:\n        db.close()\n\n\ndef get_db_generator() -> Generator:\n    \"\"\"\n    Get a new create a new database generator.\n\n    Returns:\n        Generator -- New database generator.\n    \"\"\"\n    db = SessionLocal()\n    try:\n        yield db\n    finally:\n        db.close()\n\n\n# Create ORM engine and session\nengine = create_engine(DB_URI)\nSessionLocal = sessionmaker(bind=engine)\n\n# Construct a base class for declarative class definitions\nBase = declarative_base()\n"
  },
  {
    "path": "server/backend/app/db/schema/models.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom app.db.schema import Base\nfrom app.db.utils import StatisticTypeEnum\n\nfrom sqlalchemy import Column, DateTime, Enum, ForeignKey, Integer, String\nfrom sqlalchemy.orm import relationship\n\n\nclass StatisticsModel(Base):\n    __tablename__ = \"statistic\"\n\n    device_id = Column(\n        String,\n        ForeignKey(\"device.id\"),\n        primary_key=True,\n    )\n    datetime = Column(DateTime(timezone=True), primary_key=True)\n    statistic_type = Column(Enum(StatisticTypeEnum, nullable=False))\n    people_with_mask = Column(Integer, nullable=False)\n    people_without_mask = Column(Integer, nullable=False)\n    people_total = Column(Integer, nullable=False)\n\n\nclass VideoFilesModel(Base):\n    __tablename__ = \"video_file\"\n    device_id = Column(\n        String,\n        ForeignKey(\"device.id\"),\n        primary_key=True,\n    )\n    video_name = Column(String, primary_key=True)\n \n\nclass DeviceModel(Base):\n    __tablename__ = \"device\"\n\n    id = Column(String, primary_key=True)\n    description = Column(String)\n    file_server_address = Column(String)\n    statistics = relationship(\"StatisticsModel\", cascade=\"all, delete\")\n    video_files = relationship(\"VideoFilesModel\", cascade=\"all, delete\")\n"
  },
  {
    "path": "server/backend/app/db/schema/schemas.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom datetime import datetime\nfrom typing import Optional, List\n\nfrom app.db.utils import StatisticTypeEnum\nfrom pydantic import BaseModel\n\n\nclass StatisticSchema(BaseModel):\n    device_id: str\n    datetime: datetime\n    statistic_type: StatisticTypeEnum\n    people_with_mask: int\n    people_without_mask: int\n    people_total: int\n\n    class Config:\n        orm_mode = True\n\n\nclass DeviceSchema(BaseModel):\n    id: str\n    description: Optional[str] = None\n    file_server_address: Optional[str] = None\n    statistics: List[StatisticSchema] = []\n\n    class Config:\n        orm_mode = True\n\n\nclass VideoFileSchema(BaseModel):\n    device_id: str\n    video_name: str\n\n    class Config:\n        orm_mode = True\n"
  },
  {
    "path": "server/backend/app/db/utils/__init__.py",
    "content": "from .enums import StatisticTypeEnum\nfrom .utils import convert_timestamp_to_datetime, get_enum_type\n"
  },
  {
    "path": "server/backend/app/db/utils/enums.py",
    "content": "from enum import Enum\n\n\nclass StatisticTypeEnum(str, Enum):\n    REPORT = \"REPORT\"\n    ALERT = \"ALERT\"\n"
  },
  {
    "path": "server/backend/app/db/utils/utils.py",
    "content": "from datetime import datetime, timezone\n\nfrom .enums import StatisticTypeEnum\n\n\ndef convert_timestamp_to_datetime(timestamp: float) -> datetime:\n    \"\"\"\n    Convert timestamp date format to datetime.\n\n    Arguments:\n        timestamp {float} -- Input timestamp.\n\n    Returns:\n        datetime -- Datetime formatted object which represents the\n        same information as timestamp.\n    \"\"\"\n    return datetime.fromtimestamp(timestamp, timezone.utc)\n\n\ndef get_enum_type(statistic_type: str) -> StatisticTypeEnum:\n    \"\"\"\n    Convert string object to enum.\n\n    Arguments:\n        statistic_type {str} -- Input string.\n\n    Returns:\n        StatisticTypeEnum -- Enum corresponding to statistic_type.\n    \"\"\"\n    return (\n        StatisticTypeEnum.ALERT\n        if statistic_type.lower() == \"alerts\"\n        else StatisticTypeEnum.REPORT\n    )\n"
  },
  {
    "path": "server/backend/app/main.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom fastapi import FastAPI\n\nfrom app.api import device_router, statistic_router\n\napp = FastAPI()\n\napp.include_router(device_router)\napp.include_router(statistic_router)\n\n\n@app.get(\"/\")\ndef health_check():\n    \"\"\"\n    API health check used by the load balancer.\n    \"\"\"\n    return {\"statusCode\": 200}\n"
  },
  {
    "path": "server/backend/app/mqtt/broker.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom app.core.config import MQTT_BROKER, MQTT_BROKER_PORT\nfrom paho.mqtt import client as mqtt_client\nfrom typing import Callable\n\n\ndef connect_mqtt_broker(client_id: str, cb_connect: Callable=None) -> mqtt_client:\n    \"\"\"\n    Connect to MQTT broker.\n\n    Arguments:\n        client_id {str} -- Client process id.\n        cb_connect {Callable} -- Callback for on_connect\n\n    Returns:\n        mqtt_client -- MQTT client.\n    \"\"\"\n\n    def on_connect(client, userdata, flags, code):\n        if code == 0:\n            print(\"Connected to MQTT Broker\")\n            if cb_connect is not None:\n                cb_connect(client)\n        else:\n            print(f\"Failed to connect, return code {code}\\n\")\n\n    def on_disconnect(client, userdata, code):\n        print(\"MQTT Broker disconnected\")\n\n    client = mqtt_client.Client(client_id)\n    client.on_connect = on_connect\n    client.on_disconnect = on_disconnect\n    client.connect(MQTT_BROKER, MQTT_BROKER_PORT)\n    return client\n"
  },
  {
    "path": "server/backend/app/mqtt/publisher.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport json\nimport random\nimport time\nfrom datetime import datetime, timezone, timedelta\nfrom paho.mqtt import client as mqtt_client\n\n\ndef publish(client):\n    for msg_count in range(0, 6):\n        time.sleep(1)\n\n        device_id = f\"Device_{msg_count}\"\n\n        # Test hello\n        topic = \"hello\"\n        hello_msg = {\n            \"id\": device_id,\n            \"description\": f\"Description {msg_count}\",\n        }\n        hello_result = client.publish(topic, json.dumps(hello_msg))\n\n        if hello_result[0] == 0:\n            print(f\"Send `{hello_msg}` to topic `{topic}`\")\n        else:\n            print(f\"Failed to send message to topic {topic}\")\n\n        # Test alert\n        time.sleep(1)\n        topics = [\"alerts\", \"receive-from-jetson\"]\n        now = datetime.now(timezone.utc)\n        for _ in range(0, 600):\n            topic = random.choice(topics)\n            if topic == \"alerts\":\n                people_with_mask = random.randint(2000, 3000)\n                people_without_mask = random.randint(0, 1000)\n            else:\n                people_with_mask = random.randint(0, 1000)\n                people_without_mask = random.randint(500, 1000)\n\n            alert_msg = {\n                \"device_id\": device_id,\n                \"timestamp\": datetime.timestamp(now),\n                \"people_with_mask\": people_with_mask,\n                \"people_without_mask\": people_without_mask,\n                \"people_total\": people_with_mask + people_without_mask,\n            }\n\n            result = client.publish(topic, json.dumps(alert_msg))\n\n            if result[0] == 0:\n                print(f\"Send `{alert_msg}` to topic `{topic}`\")\n            else:\n                print(f\"Failed to send message to topic {topic}\")\n\n            now += timedelta(minutes=1)\n\n\ndef connect_mqtt_broker(client_id: str):\n    def on_connect(client, userdata, flags, code):\n        if code == 0:\n            print(\"Connected to MQTT Broker\")\n        else:\n            print(f\"Failed to connect, return code {code}\\n\")\n\n    client = mqtt_client.Client(client_id)\n    client.on_connect = on_connect\n    # client.connect(\"3.17.17.197\", 1883)\n    client.connect(\"0.0.0.0\", 1883)\n    return client\n\n\ndef run():\n    client = connect_mqtt_broker(client_id=f\"publisher-{random.randint(0, 10)}\")\n    client.loop_start()\n    publish(client)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "server/backend/app/mqtt/subscriber.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport json\n\nfrom app.core.config import SUBSCRIBER_CLIENT_ID, MQTT_HELLO_TOPIC,\\\n                            MQTT_ALERT_TOPIC, MQTT_SEND_TOPIC,\\\n                            MQTT_REPORT_TOPIC, MQTT_FILES_TOPIC\nfrom app.db.cruds import create_device, create_statistic, update_files, update_device\nfrom app.db.schema import get_db_session\nfrom app.db.utils import convert_timestamp_to_datetime, get_enum_type\nfrom broker import connect_mqtt_broker\n\nfrom paho.mqtt import client as mqtt_client\nfrom sqlalchemy.exc import IntegrityError\n\nMQTT_CLIENT_TOPICS = [  # topic, QoS\n    (MQTT_HELLO_TOPIC, 2),\n    (MQTT_FILES_TOPIC, 2),\n    (MQTT_ALERT_TOPIC, 2),\n    (MQTT_REPORT_TOPIC, 2),\n    (MQTT_SEND_TOPIC, 2),\n]\n\ndef subscribe(client: mqtt_client):\n    \"\"\"\n    Subscribe client to topic.\n\n    Arguments:\n        client {mqtt_client} -- Client process id.\n    \"\"\"\n\n    def on_message(client, userdata, msg):\n        database_session = get_db_session()\n        try:\n            process_message(database_session, msg)\n        finally:\n            database_session.close()\n\n    client.subscribe(MQTT_CLIENT_TOPICS)\n    client.on_message = on_message\n\n\ndef process_message(database_session, msg):\n    \"\"\"\n    Process message sent to topic.\n\n    Arguments:\n        database_session {Session} -- Database session.\n        msg {str} -- Received message.\n    \"\"\"\n    message = json.loads(msg.payload.decode())\n\n    topic = msg.topic\n    if topic == \"hello\":\n        # Register new Jetson device\n        device_id = message[\"device_id\"]\n\n        try:\n            device_information = {\n                \"id\": device_id,\n                \"description\": message[\"description\"],\n            }\n            device = create_device(\n                db_session=database_session,\n                device_information=device_information,\n            )\n            print(\"Added device\")\n        except IntegrityError:\n            print(f\"A device with id={device_id} already exists\")\n\n    elif topic in [\"alerts\", \"receive-from-jetson\"]:\n        try:\n            # Receive alert or report and save it to the database\n            statistic_information = {\n                \"device_id\": message[\"device_id\"],\n                \"datetime\": convert_timestamp_to_datetime(message[\"timestamp\"]),\n                \"statistic_type\": get_enum_type(topic),\n                \"people_with_mask\": message[\"people_with_mask\"],\n                \"people_without_mask\": message[\"people_without_mask\"],\n                \"people_total\": message[\"people_total\"],\n            }\n\n            statistic = create_statistic(\n                db_session=database_session,\n                statistic_information=statistic_information,\n            )\n\n            print(f\"Added statistic\")\n        except IntegrityError:\n            print(f\"Error, the statistic already exist\")\n    \n    elif topic == \"video-files\":\n        try:\n            print(f\"Adding files for device_id: {message['device_id']}\")\n            new_information = {\"file_server_address\": message[\"file_server\"]}\n            update_device(db_session=database_session, device_id=message[\"device_id\"], new_device_information=new_information)\n            update_files(db_session=database_session, device_id=message[\"device_id\"], file_list=message[\"file_list\"])\n        except Exception as e:\n            print(f\"Exception trying to update files: {e}\")\n\n    elif topic == \"send-to-jetson\":\n        # Just monitoring this channel, useful for debugging\n        print(f\"Detected info sent to device_id: {message['device_id']}\")\n\n\ndef main():\n    client = connect_mqtt_broker(client_id=SUBSCRIBER_CLIENT_ID, cb_connect=subscribe)\n    client.loop_forever()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "server/backend/prestart.sh",
    "content": "#!/bin/bash\n\n# Wait database initialization and apply migrations if needed\nsleep 3\nalembic upgrade head\n\n# Init subscriber process\npython app/mqtt/subscriber.py &\n"
  },
  {
    "path": "server/backend/requirements.txt",
    "content": "paho-mqtt==1.5.1\nSQLAlchemy==1.3.21\npython-dotenv==0.15.0\npsycopg2-binary==2.8.6\nalembic==1.4.3\npytest==6.2.1\npydantic==1.7.3\n"
  },
  {
    "path": "server/backend/test_crud.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport random\nfrom datetime import datetime, timezone\n\nimport pytest\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.db.cruds import (\n    create_device,\n    create_statistic,\n    delete_device,\n    delete_statistic,\n    get_device,\n    get_devices,\n    get_statistic,\n    get_statistics,\n    update_device,\n    update_statistic,\n)\nfrom app.db.schema import get_db_session\nfrom app.db.utils import StatisticTypeEnum, convert_timestamp_to_datetime\n\nDEVICE_ID = \"test\"\ndatabase_session = get_db_session()\n\n# Device\ndef test_create_device():\n    info = {\n        \"id\": DEVICE_ID,\n        \"description\": \"test description\",\n    }\n    device = create_device(db_session=database_session, device_information=info)\n\n    assert device.id == DEVICE_ID\n    assert device.description == \"test description\"\n\n\ndef test_create_device_more_fields():\n    with pytest.raises(TypeError):\n        info = {\n            \"id\": DEVICE_ID,\n            \"description\": \"test description\",\n            \"test_field_1\": 1,\n            \"test_field_2\": 2,\n        }\n        create_device(db_session=database_session, device_information=info)\n\n\ndef test_create_same_device():\n    with pytest.raises(IntegrityError):\n        info = {\n            \"id\": DEVICE_ID,\n            \"description\": \"test description\",\n        }\n        device = create_device(\n            db_session=database_session, device_information=info\n        )\n\n\ndef test_get_device():\n    device = get_device(db_session=database_session, device_id=DEVICE_ID)\n\n    assert device.id == DEVICE_ID\n    assert device.description == \"test description\"\n\n\ndef test_update_device():\n    device = update_device(\n        db_session=database_session,\n        device_id=DEVICE_ID,\n        new_device_information={\"description\": \"new description\"},\n    )\n\n    assert device.id == DEVICE_ID\n    assert device.description == \"new description\"\n\n\n# Statistics\ndef test_create_statistic():\n    people_with_mask = 4\n    people_without_mask = 7\n    now = convert_timestamp_to_datetime(1609780971.514455)\n    # now = datetime(2021, 1, 4, 17, 22, 51, 514455, tzinfo=timezone.utc)\n\n    stat_info = {\n        \"device_id\": DEVICE_ID,\n        \"datetime\": now,\n        \"statistic_type\": StatisticTypeEnum.ALERT,\n        \"people_with_mask\": people_with_mask,\n        \"people_without_mask\": people_without_mask,\n        \"people_total\": people_with_mask + people_without_mask,\n    }\n\n    statistic = create_statistic(\n        db_session=database_session, statistic_information=stat_info\n    )\n\n    assert statistic.device_id == DEVICE_ID\n    assert statistic.datetime == now.replace(tzinfo=None)\n    assert statistic.statistic_type == StatisticTypeEnum.ALERT\n    assert statistic.people_with_mask == people_with_mask\n    assert statistic.people_without_mask == people_without_mask\n    assert statistic.people_total == people_with_mask + people_without_mask\n\n\ndef test_create_same_statistic():\n    people_with_mask = 4\n    people_without_mask = 7\n    # now = datetime(2021, 1, 4, 17, 22, 51, 514455, tzinfo=timezone.utc)\n    now = convert_timestamp_to_datetime(1609780971.514455)\n\n    with pytest.raises(IntegrityError):\n        stat_info = {\n            \"device_id\": DEVICE_ID,\n            \"datetime\": now,\n            \"statistic_type\": StatisticTypeEnum.ALERT,\n            \"people_with_mask\": people_with_mask,\n            \"people_without_mask\": people_without_mask,\n            \"people_total\": people_with_mask + people_without_mask,\n        }\n\n        create_statistic(\n            db_session=database_session, statistic_information=stat_info\n        )\n\n\ndef test_create_another_statistic():\n    people_with_mask = 5\n    people_without_mask = 8\n    # now = datetime(2021, 1, 5, 17, 22, 51, 514455, tzinfo=timezone.utc)\n    now = convert_timestamp_to_datetime(1609867371.514455)\n\n    stat_info = {\n        \"device_id\": DEVICE_ID,\n        \"datetime\": now,\n        \"statistic_type\": StatisticTypeEnum.ALERT,\n        \"people_with_mask\": people_with_mask,\n        \"people_without_mask\": people_without_mask,\n        \"people_total\": people_with_mask + people_without_mask,\n    }\n\n    statistic = create_statistic(\n        db_session=database_session, statistic_information=stat_info\n    )\n\n    assert statistic.device_id == DEVICE_ID\n    assert statistic.datetime == now.replace(tzinfo=None)\n    assert statistic.statistic_type == StatisticTypeEnum.ALERT\n    assert statistic.people_with_mask == people_with_mask\n    assert statistic.people_without_mask == people_without_mask\n    assert statistic.people_total == people_with_mask + people_without_mask\n\n\ndef test_get_statistic():\n    people_with_mask = 4\n    people_without_mask = 7\n    # now = datetime(2021, 1, 4, 17, 22, 51, 514455, tzinfo=timezone.utc)\n    now = convert_timestamp_to_datetime(1609780971.514455)\n\n    statistic = get_statistic(\n        db_session=database_session, device_id=DEVICE_ID, datetime=now\n    )\n\n    assert statistic.device_id == DEVICE_ID\n    assert statistic.datetime == now.replace(tzinfo=None)\n    assert statistic.statistic_type == StatisticTypeEnum.ALERT\n    assert statistic.people_with_mask == people_with_mask\n    assert statistic.people_without_mask == people_without_mask\n    assert statistic.people_total == people_with_mask + people_without_mask\n\n\ndef test_update_statistic():\n    people_without_mask = 7\n    # now = datetime(2021, 1, 4, 17, 22, 51, 514455, tzinfo=timezone.utc)\n    now = convert_timestamp_to_datetime(1609780971.514455)\n\n    statistic = update_statistic(\n        db_session=database_session,\n        device_id=DEVICE_ID,\n        datetime=now,\n        new_statistic_information={\"people_with_mask\": 20, \"people_total\": 27},\n    )\n\n    assert statistic.device_id == DEVICE_ID\n    assert statistic.datetime == now.replace(tzinfo=None)\n    assert statistic.statistic_type == StatisticTypeEnum.ALERT\n    assert statistic.people_with_mask == 20\n    assert statistic.people_without_mask == people_without_mask\n    assert statistic.people_total == 27\n\n\ndef test_delete_statistic():\n    people_with_mask = 20\n    people_without_mask = 7\n    # now = datetime(2021, 1, 4, 17, 22, 51, 514455, tzinfo=timezone.utc)\n    now = convert_timestamp_to_datetime(1609780971.514455)\n\n    statistic = delete_statistic(\n        db_session=database_session,\n        device_id=DEVICE_ID,\n        datetime=now,\n    )\n\n    assert statistic.device_id == DEVICE_ID\n    assert statistic.datetime == now.replace(tzinfo=None)\n    assert statistic.statistic_type == StatisticTypeEnum.ALERT\n    assert statistic.people_with_mask == people_with_mask\n    assert statistic.people_without_mask == people_without_mask\n    assert statistic.people_total == people_with_mask + people_without_mask\n\n\ndef test_delete_device():\n    device = delete_device(db_session=database_session, device_id=DEVICE_ID)\n\n    assert device.id == DEVICE_ID\n    assert device.description == \"new description\"\n\n\ndef test_get_deleted_device():\n    with pytest.raises(NoResultFound):\n        get_device(db_session=database_session, device_id=DEVICE_ID)\n\n\ndef test_update_deleted_device():\n    with pytest.raises(NoResultFound):\n        update_device(\n            db_session=database_session,\n            device_id=DEVICE_ID,\n            new_device_information={\"description\": \"new test description\"},\n        )\n\n\ndef test_get_devices():\n    devices = get_devices(db_session=database_session)\n\n    assert devices == []\n\n\ndef test_get_devices():\n    stats = get_statistics(db_session=database_session)\n\n    assert stats == []\n\n\ndatabase_session.close()\n"
  },
  {
    "path": "server/backend.env.template",
    "content": "MQTT_BROKER=mosquitto\nMQTT_BROKER_PORT=1883\nSUBSCRIBER_CLIENT_ID=server_backend\nMQTT_HELLO_TOPIC=hello\nMQTT_ALERT_TOPIC=alerts\nMQTT_REPORT_TOPIC=receive-from-jetson\nMQTT_SEND_TOPIC=send-to-jetson\n"
  },
  {
    "path": "server/build_docker.sh",
    "content": "#!/bin/bash\nsudo docker-compose build\nsudo docker-compose up -d\n"
  },
  {
    "path": "server/database.env.template",
    "content": "POSTGRES_USER=<DATABASE_USER>\nPOSTGRES_PASSWORD=<DATABASE_PASSWORD>\nPOSTGRES_PORT=5432\nPOSTGRES_DB=<DATABASE_NAME>\n"
  },
  {
    "path": "server/docker-compose.yml",
    "content": "version: '3.1'\nservices:\n\n  db:\n    image: postgres:13.2\n    env_file:\n      - database.env\n    volumes:\n      - pgdata:/var/lib/postgresql/data\n    ports:\n      - 5432:5432\n\n  mosquitto:\n    image: eclipse-mosquitto:1.6.13\n    ports:\n      - 1883:1883\n      - 8883:8883\n    volumes:\n      - mosquitto-data:/mosquitto/data\n      - mosquitto-logs:/mosquitto/logs\n      - mosquitto-conf:/mosquitto/config\n      - ./mosquitto.conf:/mosquitto/config/mosquitto.conf\n    restart: unless-stopped\n\n  backend:\n    image: backend\n    volumes:\n      - ./backend:/app\n    build:\n      context: backend\n    depends_on:\n      - db\n    ports:\n      - 80:80\n    command: bash -c \"sleep 5 && /start-reload.sh\"\n    env_file:\n      - backend.env\n      - database.env\n\n  streamlit:\n    build: frontend\n    command: \"streamlit run main.py\"\n    ports:\n      - \"8501:8501\"\n    volumes:\n      - \"./frontend:/usr/src/app\"\n    env_file:\n      - frontend.env\n    depends_on:\n      - db\n      - backend\n    links:\n      - backend\n\nvolumes:\n  pgdata:\n  mosquitto-data:\n  mosquitto-logs:\n  mosquitto-conf:\n"
  },
  {
    "path": "server/frontend/Dockerfile",
    "content": "FROM python:3.7\n\nEXPOSE 8501\n\nWORKDIR /usr/src/app\n\nCOPY requirements.txt ./\n\nRUN pip install -r requirements.txt\n\nCOPY . .\n"
  },
  {
    "path": "server/frontend/main.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport os\nimport json\n\nimport streamlit as st\n\nfrom datetime import datetime, time, timezone\nfrom session_manager import get_state\nfrom utils.api_utils import (\n    get_device,\n    get_devices,\n    get_statistics_from_to,\n    get_device_files,\n)\nfrom utils.format_utils import create_chart, format_data\n\nfrom paho.mqtt import client as mqtt_client\n\nMQTT_BROKER = os.environ[\"MQTT_BROKER\"]\nMQTT_BROKER_PORT = int(os.environ[\"MQTT_BROKER_PORT\"])\nMQTT_CLIENT_ID = os.environ[\"MQTT_CLIENT_ID\"]\n\nMQTT_TOPIC_COMMANDS = \"commands\"\nMQTT_TOPIC_STATUS = \"device-status\"\n\nCMD_FILE_SAVE = \"save_file\"\nCMD_STREAMING_START = \"streaming_start\"\nCMD_STREAMING_STOP = \"streaming_stop\"\nCMD_INFERENCE_RESTART = \"inference_restart\"\nCMD_FILESERVER_RESTART = \"fileserver_restart\"\nCMD_REQUEST_STATUS = \"status_request\"\n\nstate = get_state()\n\n\ndef display_sidebar(all_devices, state):\n    \"\"\"\n    Display sidebar information.\n    \"\"\"\n    st.sidebar.subheader(\"Device selection\")\n    state.selected_device = st.sidebar.selectbox(\n        \"Selected device\",\n        all_devices,\n        index=all_devices.index(state.selected_device if state.selected_device else None),\n    )\n\n    st.sidebar.subheader(\"Filters\")\n    state.date_filter = st.sidebar.date_input(\n        \"From date - To date\",\n        (\n            state.date_filter[0]\n            if state.date_filter and len(state.date_filter) == 2\n            else datetime.now(timezone.utc),\n            state.date_filter[1]\n            if state.date_filter and len(state.date_filter) == 2\n            else datetime.now(timezone.utc),\n        ),\n    )\n    first_column, second_column = st.sidebar.beta_columns(2)\n    state.from_time = first_column.time_input(\n        \"From time\", state.from_time if state.from_time else time(0, 0)\n    )\n    state.to_time = second_column.time_input(\n        \"To time\", state.to_time if state.to_time else time(23, 45)\n    )\n\n    state.group_data_by = st.sidebar.selectbox(\n        \"Group data by\",\n        [\"Second\", \"Minute\", \"Hour\", \"Day\", \"Week\", \"Month\"],\n        index=2,\n    )\n\n    state.show_only_one_chart = st.sidebar.checkbox(\"Show only one chart\", value=True)\n\n\ndef display_device(state):\n    \"\"\"\n    Display specific device information.\n    \"\"\"\n    selected_device = state.selected_device\n    device = get_device(selected_device)\n\n    if device is None:\n        st.write(\"Seems that something went wrong while getting the device information.\")\n    else:\n        st.header(f\"Device: {device['id']}\")\n\n        if state.mqtt_last_status:\n            status = state.mqtt_last_status  # shortcut\n            if not status[\"device_address\"]:\n                st.write(\n                    \":warning: **Set MASKCAM_DEVICE_ADDRESS on the device to enable \"\n                    \"streaming and file download links**\"\n                )\n            device_status = st.beta_container()\n            col1, col2 = device_status.beta_columns(2)\n            col1.write(\"🟢 Device connected \" f\"*(Last update: {status['time']})*\")\n            if not status[\"streaming_address\"] or status[\"streaming_address\"] == \"N/A\":\n                col2.write(\":red_circle: Streaming is stopped\")\n            else:\n                if status[\"device_address\"]:\n                    col2.write(\n                        f\"🟢 <a href=\\\"{status['streaming_address']}\\\" target=\\\"_blank\\\">\"\n                        \"Streaming enabled</a>\",\n                        unsafe_allow_html=True,\n                    )\n                else:\n                    col2.write(\n                        \"🟢 Streaming enabled (unknown device address)\",\n                    )\n            device_status.write(\n                f\"**Save videos: {status['save_current_files']}**\"\n                f\" | *Inference runtime: {status['inference_runtime']}*\"\n                f\" | *Fileserver runtime: {status['fileserver_runtime']}*\"\n            )\n\n        mqtt_status = st.empty()  # Might be changed in real time during connection\n        if not state.mqtt_last_status:\n            if not state.mqtt_status:\n                # Loading page, first connection attempt to device\n                send_mqtt_command(device[\"id\"], CMD_REQUEST_STATUS, mqtt_status)\n            else:\n                mqtt_set_status(mqtt_status, state.mqtt_status)\n\n        cols = st.beta_columns(6)\n        # Buttons from right to left\n        with cols.pop():\n            if st.button(\"Restart Deepstream\"):\n                send_mqtt_command(device[\"id\"], CMD_INFERENCE_RESTART, mqtt_status)\n        with cols.pop():\n            if st.button(\"Restart file server\"):\n                send_mqtt_command(device[\"id\"], CMD_FILESERVER_RESTART, mqtt_status)\n        with cols.pop():\n            if st.button(\"Stop streaming\"):\n                send_mqtt_command(device[\"id\"], CMD_STREAMING_STOP, mqtt_status)\n        with cols.pop():\n            if st.button(\"Start streaming\"):\n                send_mqtt_command(device[\"id\"], CMD_STREAMING_START, mqtt_status)\n        with cols.pop():\n            if st.button(\"Save a video\"):\n                send_mqtt_command(device[\"id\"], CMD_FILE_SAVE, mqtt_status)\n        with cols.pop():\n            if st.button(\"Refresh status\"):\n                send_mqtt_command(device[\"id\"], CMD_REQUEST_STATUS, mqtt_status)\n\n        st.header(\"Reported statistics\")\n        device_statistics = None\n        date_filter = state.date_filter\n\n        if len(date_filter) == 2:\n            datetime_from = f\"{date_filter[0]}T{state.from_time}\"\n            datetime_to = f\"{date_filter[1]}T{state.to_time}\"\n            device_statistics = get_statistics_from_to(selected_device, datetime_from, datetime_to)\n\n        if not device_statistics:\n            st.write(\"The selected device has no statistics to show for the given filters.\")\n        else:\n            reports, alerts = format_data(device_statistics, state.group_data_by)\n\n            if state.show_only_one_chart:\n                complete_chart = create_chart(reports=reports, alerts=alerts)\n                st.plotly_chart(complete_chart, use_container_width=True)\n            else:\n                st.subheader(\"Reports\")\n                if reports:\n                    report_chart = create_chart(reports=reports)\n                    st.plotly_chart(report_chart, use_container_width=True)\n                else:\n                    st.write(\"The selected device has no reports to show for the given filters.\")\n\n                st.subheader(\"Alerts\")\n                if alerts:\n                    alerts_chart = create_chart(alerts=alerts)\n                    st.plotly_chart(alerts_chart, use_container_width=True)\n                else:\n                    st.write(\"The selected device has no alerts to show for the given filters.\")\n        device_files = get_device_files(device_id=selected_device)\n        st.subheader(\"Saved video files on device\")\n        if not device_files:\n            st.write(\"The selected device has no saved files yet\")\n        else:\n            server_address = None\n            if not state.mqtt_last_status:\n                st.write(\":warning: **Downloads will fail since device is NOT connected**\")\n            elif state.mqtt_last_status[\"device_address\"] is None:\n                st.write(\n                    \":warning: **Set MASKCAM_DEVICE_ADDRESS on device to enable download links**\"\n                )\n            else:  # file_server_address is valid when device_address=MASKCAM_DEVICE_ADDRESS is set\n                server_address = f\"{device['file_server_address']}\"\n            for file_instance in device_files:\n                if server_address:\n                    url = f\"{server_address}/{file_instance['video_name']}\"\n                    st.markdown(f\"[{file_instance['video_name']}]({url})\")\n                else:\n                    st.markdown(f\"{file_instance['video_name']}\")\n\n\ndef mqtt_set_status(mqtt_status, text):\n    state.mqtt_status = text\n    mqtt_status.empty()\n    mqtt_status.markdown(f\"**MQTT status:** {text}\")\n\n\ndef _on_connect(client, userdata, flags, rc):\n    if rc == 0:\n        state.mqtt_connected = True\n\n\ndef _on_message(client, userdata, msg):\n    # This is the only topic the frontend subscribes to\n    assert msg.topic == MQTT_TOPIC_STATUS\n    if not state.selected_device:\n        return\n\n    message = json.loads(msg.payload.decode())\n    if message[\"device_id\"] != state.selected_device:\n        return\n\n    state.mqtt_last_status = message\n\n\n@st.cache(allow_output_mutation=True)\ndef restore_client():\n    client = mqtt_client.Client(MQTT_CLIENT_ID)\n    client.connect(MQTT_BROKER, MQTT_BROKER_PORT)\n    return client\n\n\ndef get_mqtt_client():\n    client = restore_client()\n    client.on_connect = _on_connect\n    client.on_message = _on_message\n    return client\n\n\ndef mqtt_wait_connection(client, timeout):\n    while not state.mqtt_connected and timeout:\n        client.loop(timeout=1)\n        timeout -= 1\n\n\ndef mqtt_wait_response(client, timeout):\n    while not state.mqtt_last_status and timeout:\n        client.loop(timeout=1)\n        timeout -= 1\n\n\ndef send_mqtt_message_wait_response(topic, message, mqtt_status):\n    # This function connects, sends a message and waits for the reply. Reconnects as needed\n    try:\n        client = get_mqtt_client()\n        if not state.mqtt_connected:\n            mqtt_set_status(mqtt_status, \"Connecting...\")\n            mqtt_wait_connection(client, 5)\n\n        state.mqtt_last_status = None  # Reset status to await updated response\n\n        # Since we're not running the client.loop() permanently,\n        # the way to ensure that the MQTT client is connected is to try\n        # to send a message and if it fails, try reconnecting.\n        retry_publish = 2  # mosquitto disconnects after a while so at least use 2 here\n        while retry_publish:\n            retry_publish -= 1\n            client.subscribe(MQTT_TOPIC_STATUS)  # Must be done after reconnection\n            msg_info = client.publish(topic, json.dumps(message))\n            mqtt_set_status(mqtt_status, \"Sending message...\")\n\n            timeout = 5\n            while not msg_info.rc and not msg_info.is_published() and timeout:\n                client.loop(1)\n                timeout -= 1\n\n            if msg_info.is_published():\n                mqtt_set_status(mqtt_status, \":clock3: Waiting device response...\")\n                retry_publish = 0  # Success: exit retry loop\n            elif msg_info.rc:\n                state.mqtt_connected = False\n                mqtt_set_status(mqtt_status, \":clock3: Reconnecting...\")\n                client.reconnect()\n                mqtt_wait_connection(client, 5)\n\n        if not msg_info.is_published():\n            mqtt_set_status(mqtt_status, \":o: Message failed\")\n            return\n\n        mqtt_wait_response(client, 5)\n        if not state.mqtt_last_status:\n            mqtt_set_status(mqtt_status, \":red_circle: Device not responding\")\n\n    except Exception as e:\n        mqtt_set_status(mqtt_status, f\":red_circle: Could not connect to MQTT broker: {e}\")\n\n\ndef send_mqtt_command(device_id, command, mqtt_status):\n    send_mqtt_message_wait_response(\n        MQTT_TOPIC_COMMANDS, {\"device_id\": device_id, \"command\": command}, mqtt_status\n    )\n\n\ndef main():\n    st.set_page_config(page_title=\"Maskcam\")\n\n    st.title(\"MaskCam dashboard\")\n    all_devices = get_devices()\n    display_sidebar(all_devices, state)\n\n    if state.selected_device is None:\n        st.write(\"Please select a device.\")\n    else:\n        display_device(state)\n\n    state.sync()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "server/frontend/requirements.txt",
    "content": "streamlit==0.74.1\nrequests==2.25.1\nplotly==4.14.3\npaho-mqtt==1.5.1\n"
  },
  {
    "path": "server/frontend/session_manager.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport streamlit as st\nfrom streamlit.hashing import _CodeHasher\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.server.server import Server\n\n\nclass _SessionState:\n    def __init__(self, session, hash_funcs):\n        \"\"\"\n        Initialize SessionState instance.\n        \"\"\"\n        self.__dict__[\"_state\"] = {\n            \"data\": {},\n            \"hash\": None,\n            \"hasher\": _CodeHasher(hash_funcs),\n            \"is_rerun\": False,\n            \"session\": session,\n        }\n\n    def __call__(self, **kwargs):\n        \"\"\"\n        Initialize state data once.\n        \"\"\"\n        for item, value in kwargs.items():\n            if item not in self._state[\"data\"]:\n                self._state[\"data\"][item] = value\n\n    def __getitem__(self, item):\n        \"\"\"\n        Return a saved state value, None if item is undefined.\n        \"\"\"\n        return self._state[\"data\"].get(item, None)\n\n    def __getattr__(self, item):\n        \"\"\"\n        Return a saved state value, None if item is undefined.\n        \"\"\"\n        return self._state[\"data\"].get(item, None)\n\n    def __setitem__(self, item, value):\n        \"\"\"Set state value.\"\"\"\n        self._state[\"data\"][item] = value\n\n    def __setattr__(self, item, value):\n        \"\"\"\n        Set state value.\n        \"\"\"\n        self._state[\"data\"][item] = value\n\n    def clear(self):\n        \"\"\"\n        Clear session state and request a rerun.\n        \"\"\"\n        self._state[\"data\"].clear()\n        self._state[\"session\"].request_rerun()\n\n    def sync(self):\n        \"\"\"\n        Rerun the app with all state values up to date from the beginning to fix rollbacks.\n        \"\"\"\n\n        # Ensure to rerun only once to avoid infinite loops\n        # caused by a constantly changing state value at each run.\n        #\n        # Example: state.value += 1\n        if self._state[\"is_rerun\"]:\n            self._state[\"is_rerun\"] = False\n\n        elif self._state[\"hash\"] is not None:\n            if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n                self._state[\"data\"], None\n            ):\n                self._state[\"is_rerun\"] = True\n                self._state[\"session\"].request_rerun()\n\n        self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(\n            self._state[\"data\"], None\n        )\n\n\ndef _get_session():\n    session_id = get_report_ctx().session_id\n    session_info = Server.get_current()._get_session_info(session_id)\n\n    if session_info is None:\n        raise RuntimeError(\"Couldn't get your Streamlit Session object.\")\n\n    return session_info.session\n\n\ndef get_state(hash_funcs=None):\n    session = _get_session()\n\n    if not hasattr(session, \"_custom_session_state\"):\n        session._custom_session_state = _SessionState(session, hash_funcs)\n\n    return session._custom_session_state\n"
  },
  {
    "path": "server/frontend/utils/api_utils.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport json\nimport os\nfrom typing import Dict, List\n\nimport requests\n\nSERVER_URL = os.environ[\"SERVER_URL\"]\n\n\ndef get_devices():\n    \"\"\"\n    Get all devices.\n    \"\"\"\n    response = requests.get(f\"http://{SERVER_URL}/devices\")\n\n    devices_json = json.loads(response.content) if response.ok else []\n\n    devices = [None]\n    devices.extend([device[\"id\"] for device in devices_json])\n    return devices\n\n\ndef get_device(device_id: str):\n    \"\"\"\n    Get specific device.\n\n    Arguments:\n        device_id {str} -- Device id.\n    \"\"\"\n    response = requests.get(f\"http://{SERVER_URL}/devices/{device_id}\")\n\n    return json.loads(response.content) if response.ok else None\n\n\ndef get_statistics_from_to(device_id, datetime_from, datetime_to):\n    \"\"\"\n    Get statistics from a specific device within a datetime range.\n\n    Arguments:\n        device_id {str} -- Device id.\n        datetime_from {str} -- Datetime from.\n        datetime_to {str} -- Datetime to.\n    \"\"\"\n    response = requests.get(\n        f\"http://{SERVER_URL}/devices/{device_id}/statistics?datefrom={datetime_from}&dateto={datetime_to}\"\n    )\n\n    return json.loads(response.content) if response.ok else None\n\ndef get_device_files(device_id):\n    \"\"\"\n    Get files from a specific device\n\n    Arguments:\n        device_id {str} -- Device id.\n    \"\"\"\n    response = requests.get(\n        f\"http://{SERVER_URL}/files/{device_id}\"\n    )\n\n    return json.loads(response.content) if response.ok else None\n\n"
  },
  {
    "path": "server/frontend/utils/format_utils.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nfrom typing import Dict, List\n\nimport pandas as pd\nimport numpy as np\n\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\n\ndef format_data(statistics: List = [], group_data_by: str = None):\n    \"\"\"\n    Format data to be displayed in the carts.\n\n    Arguments:\n        statistics {List} -- All device statistics.\n        group_data_by {str} -- User selected aggregation option.\n    \"\"\"\n    reports, alerts = {}, {}\n\n    for statistic in statistics:\n        # Separate reports from alerts\n        if statistic[\"statistic_type\"] == \"REPORT\":\n            if not reports:\n                reports = create_statistics_dict()\n\n            reports = add_information(reports, statistic)\n        else:\n            if not alerts:\n                alerts = create_statistics_dict()\n\n            alerts = add_information(alerts, statistic)\n\n    # Aggregate data\n    if reports:\n        reports = group_data(reports, group_data_by)\n\n    if alerts:\n        alerts = group_data(alerts, group_data_by)\n\n    return reports, alerts\n\n\ndef group_data(data: Dict, group_data_by: str):\n    \"\"\"\n    Aggregate data using different criteria.\n\n    Arguments:\n        data {Dict} -- Data to aggregate.\n        group_data_by {str} -- User selected aggregation option.\n    \"\"\"\n    data_df = pd.DataFrame.from_dict(data)\n    data_df[\"dates\"] = pd.to_datetime(data_df[\"dates\"])\n\n    criterion = \"H\"\n    if group_data_by == \"Second\":\n        criterion = \"S\"\n    elif group_data_by == \"Minute\":\n        criterion = \"T\"\n    elif group_data_by == \"Day\":\n        criterion = \"D\"\n    elif group_data_by == \"Week\":\n        criterion = \"W\"\n    elif group_data_by == \"Month\":\n        criterion = \"M\"\n\n    group = (\n        data_df.resample(criterion, on=\"dates\")\n        .agg(\n            {\n                \"people_total\": \"sum\",\n                \"people_with_mask\": \"sum\",\n                \"people_without_mask\": \"sum\",\n            }\n        )\n        .reset_index()\n    )\n\n    # Calculate new mask percentage\n    group[\"mask_percentage\"] = (\n        group[\"people_with_mask\"] * 100 / group[\"people_total\"]\n    )\n    group[\"mask_percentage\"] = group[\"mask_percentage\"].replace(\n        [np.inf, -np.inf], 0\n    )\n\n    group[\"visible_people\"] = (\n        group[\"people_with_mask\"] + group[\"people_without_mask\"]\n    )\n\n    # Drop empty lines\n    group.dropna(subset=[\"mask_percentage\"], inplace=True)\n    group = group.to_dict()\n    grouped_data = {\n        \"dates\": [t.to_pydatetime() for t in group[\"dates\"].values()],\n        \"people_with_mask\": list(group[\"people_with_mask\"].values()),\n        \"people_total\": list(group[\"people_total\"].values()),\n        \"mask_percentage\": list(group[\"mask_percentage\"].values()),\n        \"visible_people\": list(group[\"visible_people\"].values()),\n    }\n\n    return grouped_data\n\n\ndef create_statistics_dict():\n    \"\"\"\n    Chart data structure.\n    \"\"\"\n    return {\n        \"dates\": [],\n        \"mask_percentage\": [],\n        \"people_total\": [],\n        \"people_with_mask\": [],\n        \"people_without_mask\": [],\n        \"visible_people\": [],\n    }\n\n\ndef add_information(statistic_dict: Dict, statistic_information: Dict):\n    \"\"\"\n    Add information to existing dict.\n\n    Arguments:\n        statistic_dict {Dict} -- Existing dict.\n        statistic_information {Dict} -- Data to add.\n    \"\"\"\n    statistic_dict[\"dates\"].append(statistic_information[\"datetime\"])\n\n    total = statistic_information[\"people_total\"]\n    people_with_mask = statistic_information[\"people_with_mask\"]\n    people_without_mask = statistic_information[\"people_without_mask\"]\n\n    statistic_dict[\"people_total\"].append(total)\n    statistic_dict[\"people_with_mask\"].append(people_with_mask)\n    statistic_dict[\"people_without_mask\"].append(people_without_mask)\n\n    mask_percentage = (\n        statistic_information[\"people_with_mask\"] * 100 / total\n        if total != 0\n        else 0\n    )\n    statistic_dict[\"mask_percentage\"].append(mask_percentage)\n\n    statistic_dict[\"visible_people\"].append(\n        people_with_mask + people_without_mask\n    )\n\n    return statistic_dict\n\n\ndef create_chart(reports: Dict = {}, alerts: Dict = {}):\n    \"\"\"\n    Create Plotly chart.\n\n    Arguments:\n        reports {Dict} -- Reports data.\n        alerts {Dict} -- Alerts data.\n    \"\"\"\n\n    # Create figure with secondary y-axis\n    figure = make_subplots(specs=[[{\"secondary_y\": True}]])\n\n    if reports:\n        report_colors = {\n            \"people_total\": \"darkslategray\",\n            \"people_with_mask\": \"cadetblue\",\n            \"mask_percentage\": \"limegreen\",\n            \"visible_people\": \"royalblue\",\n        }\n        figure = add_trace(reports, figure, report_colors, trace_type=\"report\")\n\n    if alerts:\n        alert_colors = {\n            \"people_total\": \"darkred\",\n            \"people_with_mask\": \"salmon\",\n            \"mask_percentage\": \"orange\",\n            \"visible_people\": \"indianred\",\n        }\n        figure = add_trace(alerts, figure, alert_colors, trace_type=\"alert\")\n\n    # Set x-axis title\n    figure.update_xaxes(title_text=\"Datetime\")\n\n    # Set y-axes titles\n    figure.update_yaxes(title_text=\"Number of people\", secondary_y=False)\n    figure.update_yaxes(\n        title_text=\"Mask Percentage\", secondary_y=True, rangemode=\"tozero\"\n    )\n    figure.update_layout(\n        xaxis_tickformat=\"%H:%M:%S <br> %Y/%d/%m\",\n        barmode=\"group\",\n        bargap=0.4,\n        bargroupgap=0.1,\n        legend={\n            \"orientation\": \"h\",\n            \"yanchor\": \"bottom\",\n            \"y\": 1.02,\n            \"xanchor\": \"right\",\n            \"x\": 1,\n        },\n        margin=dict(t=50, b=30, r=30),\n        font=dict(\n            size=10,\n        ),\n    )\n\n    return figure\n\n\ndef add_trace(trace_information: Dict, figure, colors: Dict, trace_type=\"\"):\n    \"\"\"\n    Add new trace to Plotly chart.\n\n    Arguments:\n        trace_information {Dict} -- Trace to add.\n        figure -- Plotly chart.\n        colors {Dict} -- Colors to use in the new trace.\n        trace_type -- Alert or Report.\n    \"\"\"\n    figure.add_trace(\n        go.Bar(\n            x=trace_information[\"dates\"],\n            y=trace_information[\"people_total\"],\n            name=\"People\" if not trace_type else f\"People {trace_type}\",\n            marker_color=colors[\"people_total\"],\n        ),\n        secondary_y=False,\n    )\n\n    figure.add_trace(\n        go.Bar(\n            x=trace_information[\"dates\"],\n            y=trace_information[\"people_with_mask\"],\n            name=\"Masks\" if not trace_type else f\"Masks {trace_type}\",\n            marker_color=colors[\"people_with_mask\"],\n        ),\n        secondary_y=False,\n    )\n\n    figure.add_trace(\n        go.Bar(\n            x=trace_information[\"dates\"],\n            y=trace_information[\"visible_people\"],\n            name=\"Visible\" if not trace_type else f\"Visible {trace_type}\",\n            marker_color=colors[\"visible_people\"],\n        ),\n        secondary_y=False,\n    )\n\n    figure.add_trace(\n        go.Scatter(\n            x=trace_information[\"dates\"],\n            y=trace_information[\"mask_percentage\"],\n            name=\"Mask %\" if not trace_type else f\"Mask % {trace_type}\",\n            marker_color=colors[\"mask_percentage\"],\n        ),\n        secondary_y=True,\n    )\n\n    return figure\n"
  },
  {
    "path": "server/frontend.env.template",
    "content": "SERVER_URL=backend\nMQTT_BROKER=mosquitto\nMQTT_BROKER_PORT=1883\nMQTT_CLIENT_ID=server_frontend"
  },
  {
    "path": "server/mosquitto.conf",
    "content": "bind_address 0.0.0.0"
  },
  {
    "path": "server/server_setup.sh",
    "content": "#!/bin/bash\nsudo apt-get update\nsudo apt install docker.io -y\nsudo systemctl start docker\nsudo curl -L \"https://github.com/docker/compose/releases/download/1.27.4/docker-compose-$(uname -s)-$(uname -m)\" -o /usr/local/bin/docker-compose\nsudo chmod +x /usr/local/bin/docker-compose\n\nsudo bash ./build_docker.sh\n"
  },
  {
    "path": "server/stop_docker.sh",
    "content": "#!/bin/bash\nsudo docker-compose down\n"
  },
  {
    "path": "utils/combine_coco.py",
    "content": "# %%\nimport json\nimport sys\n\n\n# %%\ndef merge_2_into_1(json1, json2):\n    # ID offsets to put annotations and images from json2 into json1\n    annotations1 = json1[\"annotations\"]\n    new_id_ann = 1 + max(*[ann[\"id\"] for ann in annotations1])\n\n    images1 = json1[\"images\"]\n    new_id_im = 1 + max(*[im[\"id\"] for im in images1])\n\n    categories1 = json1[\"categories\"]\n    categories2 = json2[\"categories\"]\n    annotations2 = json2[\"annotations\"]\n    images2 = json2[\"images\"]\n\n    # Append images2 to images1 and build a map from old to new IDs\n    img_ids_1 = {im1[\"file_name\"]: im1[\"id\"] for im1 in images1}\n    map_im2_to_im1 = {}\n    for im2 in images2:\n        if im2[\"file_name\"] in img_ids_1:  # im2 was already in JSON 1\n            map_im2_to_im1[im2[\"id\"]] = img_ids_1[im2[\"file_name\"]]\n        else:\n            map_im2_to_im1[im2[\"id\"]] = new_id_im\n            im2[\"id\"] = new_id_im\n            images1.append(im2)  # Add to JSON 1\n            new_id_im += 1\n\n    cat_ids_1 = {cat1[\"name\"]: cat1[\"id\"] for cat1 in categories1}\n    map_cat2_to_cat1 = {cat2[\"id\"]: cat_ids_1[cat2[\"name\"]] for cat2 in categories2}\n    print(f\"Cats 1: {categories1}\")\n    print(f\"Cats 2: {categories2}\")\n    print(f\"Map: {map_cat2_to_cat1}\")\n\n    for annotation2 in annotations2:\n        annotation2[\"image_id\"] = map_im2_to_im1[annotation2[\"image_id\"]]\n        annotation2[\"category_id\"] = map_cat2_to_cat1[annotation2[\"category_id\"]]\n        annotation2[\"id\"] = new_id_ann\n        annotations1.append(annotation2)\n        new_id_ann += 1\n\n\ndef print_coco(js):\n    print(\n        f\"Images: {len(js['images'])} \"\n        f\"| Annotations: {len(js['annotations'])} \"\n        f\"| Categories: {len(js['categories'])}\"\n    )\n\n\ndef open_coco(file_name):\n    with open(file_name, \"r\") as f:\n        js = json.load(f)\n        print(f\"Loaded JSON file: {file_name}\")\n        print_coco(js)\n    return js\n\n\n# %%\nfile1 = sys.argv[1]\nother_files = sys.argv[2:]\n\n# file1 = \"../new_dataset/annotations/instances_1_30s.json\"\n# file2 = \"../new_dataset/annotations/instances_2_30s.json\"\n\njson1 = open_coco(file1)\n\nfor file2 in other_files:\n    merge_2_into_1(json1, open_coco(file2))\n\noutput_filename = \"merge_result.json\"\nwith open(output_filename, \"w\") as output_file:\n    json.dump(json1, output_file)\n    print(f\"Saved {output_filename}\")\n    print_coco(json1)\n"
  },
  {
    "path": "utils/gst_capabilities.sh",
    "content": "gst-launch-1.0 --gst-debug=v4l2src:5 v4l2src device=/dev/video0 ! fakesink 2>&1 | sed -une '/caps of src/ s/[:;] /\\n/gp'"
  },
  {
    "path": "utils/mqtt-test/broker.py",
    "content": "import os\nfrom paho.mqtt import client as mqtt_client\n\nMQTT_BROKER = os.environ[\"MQTT_BROKER\"]\nMQTT_BROKER_PORT = 1883\n\n\ndef connect_mqtt_broker(client_id: str) -> mqtt_client:\n    def on_connect(client, userdata, flags, code):\n        if code == 0:\n            print(\"Connected to MQTT Broker\")\n        else:\n            print(f\"Failed to connect, return code {code}\\n\")\n\n    client = mqtt_client.Client(client_id)\n    client.on_connect = on_connect\n    client.connect(MQTT_BROKER, MQTT_BROKER_PORT)\n    return client\n"
  },
  {
    "path": "utils/mqtt-test/publisher.py",
    "content": "import json\nimport random\nimport time\nfrom datetime import datetime, timezone\n\nfrom broker import connect_mqtt_broker\n\n\ndef publish(client):\n    while True:\n        time.sleep(1)\n\n        # Test topic\n        test_topic = \"test\"\n        test_result = client.publish(\n            test_topic,\n            json.dumps(\n                {\n                    \"msg\": f\"testing\",\n                    \"timestamp\": datetime.timestamp(datetime.now(timezone.utc)),\n                }\n            ),\n        )\n\n        if test_result[0] == 0:\n            print(f\"Send test msg to test topic\")\n        else:\n            print(f\"Failed to send message to test topic\")\n\n\ndef run():\n    client = connect_mqtt_broker(client_id=f\"publisher-test\")\n    client.loop_start()\n    publish(client)\n\n\nif __name__ == \"__main__\":\n    run()\n"
  },
  {
    "path": "utils/mqtt-test/suscriber.py",
    "content": "import json\n\nfrom broker import connect_mqtt_broker\nfrom paho.mqtt import client as mqtt_client\n\n\ndef subscribe(client: mqtt_client):\n    def on_message(client, userdata, msg):\n        message = json.loads(msg.payload.decode())\n        topic = msg.topic\n        print(f\"Message received in topic: {topic}\")\n        print(message)\n\n    test_topic = \"test\"\n    client.subscribe(test_topic)\n    client.on_message = on_message\n\n\ndef main():\n    client = connect_mqtt_broker(client_id=f\"subscriber-test\")\n    subscribe(client)\n    client.loop_forever()\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "utils/onnx_fix_mobilenet.py",
    "content": "import sys\nimport onnx_graphsurgeon as gs\nimport onnx\nimport numpy as np\n\n\"\"\"\nThis code is intended to change the uint8 input type (not supported by TensorRT)\nThe input ONNX file can be produced by converting with:\nhttps://github.com/onnx/tensorflow-onnx\n\nAfter conversion, the output ONNX should be able to be converted as:\n/usr/src/tensorrt/bin/trtexec --fp16 --onnx=input_file.onnx --explicitBatch --saveEngine=output_file.trt\nBut that step still fails due to NonMaxSuppression plugin not found (operation not supported by TensorRT)\n\"\"\"\n\ninput_onnx = sys.argv[1]\noutput_onnx = sys.argv[2]\ngraph = gs.import_onnx(onnx.load(input_onnx))\nfor inp in graph.inputs:\n    inp.dtype = np.float32\n\nonnx.save(gs.export_onnx(graph), output_onnx)\n"
  },
  {
    "path": "utils/remove_images_coco.py",
    "content": "# %%\nimport json\nimport sys\n\n\n# %%\ndef merge_2_into_1(json1, json2):\n    # TODO: Remap category IDs according to their name\n    # ID offsets to put annotations and images from json2 into json1\n    annotations1 = json1[\"annotations\"]\n    new_id_ann = 1 + max(*[ann[\"id\"] for ann in annotations1])\n\n    images1 = json1[\"images\"]\n    new_id_im = 1 + max(*[im[\"id\"] for im in images1])\n\n    annotations2 = json2[\"annotations\"]\n    images2 = json2[\"images\"]\n\n    # Append images2 to images1 and build a map from old to new IDs\n    img_ids_1 = {im1[\"file_name\"]: im1[\"id\"] for im1 in images1}\n    map_im2_to_im1 = {}\n    for im2 in images2:\n        if im2[\"file_name\"] in img_ids_1:  # im2 was already in JSON 1\n            map_im2_to_im1[im2[\"id\"]] = img_ids_1[im2[\"file_name\"]]\n        else:\n            map_im2_to_im1[im2[\"id\"]] = new_id_im\n            im2[\"id\"] = new_id_im\n            images1.append(im2)  # Add to JSON 1\n            new_id_im += 1\n\n    for annotation2 in annotations2:\n        annotation2[\"image_id\"] = map_im2_to_im1[annotation2[\"image_id\"]]\n        annotation2[\"id\"] = new_id_ann\n        annotations1.append(annotation2)\n        new_id_ann += 1\n\n\ndef print_coco(js):\n    print(\n        f\"Images: {len(js['images'])} \"\n        f\"| Annotations: {len(js['annotations'])} \"\n        f\"| Categories: {len(js['categories'])}\"\n    )\n\n\ndef open_coco(file_name):\n    with open(file_name, \"r\") as f:\n        js = json.load(f)\n        print(f\"Loaded JSON file: {file_name}\")\n        print_coco(js)\n    return js\n\n\n# %%\nfile1 = sys.argv[1]\nimages_to_remove = sys.argv[2:]\n\njson1 = open_coco(file1)\n\nimages = json1[\"images\"]\nannotations = json1[\"annotations\"]\nnew_images = []\nnew_annotations = []\nimg_ids_to_keep = set()\n\n# Append images2 to images1 and build a map from old to new IDs\nfor im in images:\n    if im[\"file_name\"] not in images_to_remove:\n        img_ids_to_keep.add(im[\"id\"])\n        new_images.append(im)\n\nfor ann in annotations:\n    if ann[\"image_id\"] in img_ids_to_keep:\n        new_annotations.append(ann)\n\njson1[\"images\"] = new_images\njson1[\"annotations\"] = new_annotations\n\noutput_filename = \"clean_result.json\"\nwith open(output_filename, \"w\") as output_file:\n    json.dump(json1, output_file)\n    print(f\"Saved {output_filename}\")\n    print_coco(json1)\n"
  },
  {
    "path": "utils/tf1_trt_inference.py",
    "content": "import tensorflow as tf\nfrom tensorflow.python.compiler.tensorrt import trt_convert\nimport tensorflow.contrib.tensorrt as trt\n\n\n\"\"\"\nCode based on:\n - https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html#using-savedmodel\n - https://github.com/NVIDIA-AI-IOT/tf_trt_models/blob/master/examples/detection/detection.ipynb\n\"\"\"\n\n# converter = trt_convert.TrtGraphConverter(\n#     input_saved_model_dir=input_saved_model_dir,\n#     precision_mode=”FP16”,\n#     maximum_cached_engines=100)\n# converter.convert()\n# converter.save(output_saved_model_dir)\n\n# with tf.Session() as sess:\n#     # First load the SavedModel into the session\n#     tf.saved_model.loader.load(\n#         sess, [tf.saved_model.tag_constants.SERVING],\n#        output_saved_model_dir)\n#     output = sess.run([output_tensor], feed_dict={input_tensor: input_data})\n\ninput_name = [\"image_tensor\"]\noutput_names = [\n    \"detection_boxes\",\n    \"detection_classes\",\n    \"detection_scores\",\n    \"num_detections\",\n]\n\ntf_config.gpu_options.allow_growth = True\ntf_sess = tf.Session(config=tf_config)\ntf.import_graph_def(trt_graph, name=\"\")\n\ntf_input = tf_sess.graph.get_tensor_by_name(input_names[0] + \":0\")\ntf_scores = tf_sess.graph.get_tensor_by_name(\"detection_scores:0\")\ntf_boxes = tf_sess.graph.get_tensor_by_name(\"detection_boxes:0\")\ntf_classes = tf_sess.graph.get_tensor_by_name(\"detection_classes:0\")\ntf_num_detections = tf_sess.graph.get_tensor_by_name(\"num_detections:0\")\n\ntrt_graph = trt.create_inference_graph(\n    input_graph_def=frozen_graph,\n    outputs=output_names,\n    max_batch_size=1,\n    max_workspace_size_bytes=1 << 25,\n    precision_mode=\"FP16\",\n    minimum_segment_size=50,\n)\n"
  },
  {
    "path": "utils/tf2_trt_convert.py",
    "content": "import sys\nimport tensorflow as tf\nfrom tensorflow.python.compiler.tensorrt import trt_convert as trt\n\ninput_saved_model_dir = sys.argv[1]\noutput_saved_model_dir = sys.argv[2]\n\n\"\"\"\nThis code is based on:\n - https://docs.nvidia.com/deeplearning/frameworks/tf-trt-user-guide/index.html#worflow-with-savedmodel\n - https://sayak.dev/tf.keras/tensorrt/tensorflow/2020/07/01/accelerated-inference-trt.html\n\nCurrently fails after a while (jetson, tensorflow==2.3.1) with:\n\n2020-11-27 16:32:23.116411: W tensorflow/core/framework/op_kernel.cc:1767] OP_REQUIRES failed at trt_engine_resource_ops.cc:196 : Not found: Container TF-TRT does not exist. (Could not find resource: TF-TRT/TRTEngineOp_0_0)\n\"\"\"\n\nconversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS\n# conversion_params = conversion_params._replace(max_workspace_size_bytes=(1 << 32))\nconversion_params = conversion_params._replace(precision_mode=\"FP16\")\nconversion_params = conversion_params._replace(maximum_cached_engines=100)\n\nconverter = trt.TrtGraphConverterV2(\n    input_saved_model_dir=input_saved_model_dir, conversion_params=conversion_params\n)\nconverter.convert()\n\nconverter.save(output_saved_model_dir)\n\n\n# saved_model_loaded = tf.saved_model.load(\n#     output_saved_model_dir, tags=[tag_constants.SERVING])\n# graph_func = saved_model_loaded.signatures[\n#     signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n# frozen_func = convert_to_constants.convert_variables_to_constants_v2(\n#     graph_func)\n# output = frozen_func(input_data)[0].numpy()\n"
  },
  {
    "path": "utils/tf_trt_convert.py",
    "content": "# %%\nimport sys\nimport os\nimport time\nimport urllib\nimport matplotlib\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.contrib.tensorrt as trt\n\nmatplotlib.use(\"Agg\")\n\nfrom PIL import Image\n\n# Install this from: https://github.com/NVIDIA-AI-IOT/tf_trt_models\n# Tested on Nano with python 3.6.9 and TF-1.15.4\nfrom tf_trt_models.detection import download_detection_model, build_detection_graph\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patches\n\n# %%\n# inference_graph_path = \"../inference_graph_1024x608\"\n# inference_graph_path = \"../inference_graph_300x300\"\ninference_graph_path = \"../inference_graph_400x708\"\nconfig_path = f\"{inference_graph_path}/pipeline.config\"\ncheckpoint_path = f\"{inference_graph_path}/model.ckpt\"\nbatch_size = 4\nscore_threshold = 0.5  # TODO: Try this, with 0.5 in 300x300 and check results w/slack\n\nfrozen_graph, input_names, output_names = build_detection_graph(\n    config=config_path,\n    checkpoint=checkpoint_path,\n    batch_size=batch_size,\n    score_threshold=score_threshold,\n)\n# %%\n\ntrt_graph = trt.create_inference_graph(\n    input_graph_def=frozen_graph,\n    outputs=output_names,\n    max_batch_size=batch_size,\n    max_workspace_size_bytes=1 << 25,\n    precision_mode=\"FP16\",\n    minimum_segment_size=50,\n)\n\n# %%\nconverted_trt_graph_file = f\"converted_trt_708_400_bs{batch_size}.pb\"\n# %%\nwith open(converted_trt_graph_file, \"wb\") as f:\n    f.write(trt_graph.SerializeToString())\n\n# %%\ntrt_graph = tf.GraphDef()\nwith open(converted_trt_graph_file, \"rb\") as f:\n    trt_graph.ParseFromString(f.read())\n# %%\ninput_names = [\"image_tensor\"]\noutput_names = [\n    \"detection_boxes\",\n    \"detection_classes\",\n    \"detection_scores\",\n    \"num_detections\",\n]\n\n\n# %%\ntf_config = tf.ConfigProto()\ntf_config.gpu_options.allow_growth = True\n\ntf_sess = tf.Session(config=tf_config)\n\ntf.import_graph_def(trt_graph, name=\"\")\n\ntf_input = tf_sess.graph.get_tensor_by_name(input_names[0] + \":0\")\ntf_scores = tf_sess.graph.get_tensor_by_name(\"detection_scores:0\")\ntf_boxes = tf_sess.graph.get_tensor_by_name(\"detection_boxes:0\")\ntf_classes = tf_sess.graph.get_tensor_by_name(\"detection_classes:0\")\ntf_num_detections = tf_sess.graph.get_tensor_by_name(\"num_detections:0\")\n\n# %%\npaths = [\"../yolo/data/obj_train_data/images/hdstock_2_90.jpg\"]\npaths += [\"../yolo/data/obj_train_data/images/1_30s_0.jpg\"]\nimage = Image.open(paths[0])\n\nplt.imshow(image)\n\n# image_resized = np.array(image.resize((1024, 608)))\n# image_resized = np.array(image.resize((300, 300)))\nimage_resized = np.array(image.resize((708, 400)))\nimage = np.array(image)\n\n# %%\nscores, boxes, classes, num_detections = tf_sess.run(\n    [tf_scores, tf_boxes, tf_classes, tf_num_detections],\n    feed_dict={tf_input: np.stack([image_resized] * batch_size)},\n)\n\nboxes = boxes[0]  # index by 0 to remove batch dimension\nscores = scores[0]\nclasses = classes[0]\nnum_detections = num_detections[0]\n\n# %%\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\nax.imshow(image)\n\n# plot boxes exceeding score threshold\nfor i in range(int(num_detections)):\n    # scale box to image coordinates\n    box = boxes[i] * np.array(\n        [image.shape[0], image.shape[1], image.shape[0], image.shape[1]]\n    )\n\n    # display rectangle\n    patch = patches.Rectangle(\n        (box[1], box[0]), box[3] - box[1], box[2] - box[0], color=\"g\", alpha=0.3\n    )\n    ax.add_patch(patch)\n\n    # display class index and score\n    plt.text(\n        x=box[1] + 10,\n        y=box[2] - 10,\n        s=\"%d (%0.2f) \" % (classes[i], scores[i]),\n        color=\"w\",\n    )\nplt.savefig(\"detections.png\")\n\n# %%\nnum_samples = 50\n\ninput_batch = np.stack([image_resized] * batch_size)\nt0 = time.time()\nfor i in range(num_samples):\n    scores, boxes, classes, num_detections = tf_sess.run(\n        [tf_scores, tf_boxes, tf_classes, tf_num_detections],\n        feed_dict={tf_input: input_batch},\n    )\nt1 = time.time()\nprint(\"Average runtime: %f seconds\" % (float(t1 - t0) / num_samples))\n\n# %%\ntf_sess.close()\n"
  },
  {
    "path": "yolo/config.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\nimport yaml\n\n\nclass Config:\n    def __init__(self, config_file_path):\n        # Load config file\n        with open(config_file_path, \"r\") as stream:\n            self._config = yaml.load(stream, Loader=yaml.FullLoader)\n\n        # Define colors to be used internally through the app, and also externally if wanted\n        self.colors = {\n            \"green\": (0, 128, 0),\n            \"white\": (255, 255, 255),\n            \"olive\": (0, 128, 128),\n            \"black\": (0, 0, 0),\n            \"navy\": (128, 0, 0),\n            \"red\": (0, 0, 255),\n            \"pink\": (128, 128, 255),\n            \"maroon\": (0, 0, 128),\n            \"grey\": (128, 128, 128),\n            \"purple\": (128, 0, 128),\n            \"yellow\": (0, 255, 255),\n            \"lime\": (0, 255, 0),\n            \"fuchsia\": (255, 0, 255),\n            \"aqua\": (255, 255, 0),\n            \"blue\": (255, 0, 0),\n            \"teal\": (128, 128, 0),\n            \"silver\": (192, 192, 192),\n        }\n\n    def __getitem__(self, name):\n        return self._config[name]\n"
  },
  {
    "path": "yolo/config_images.yml",
    "content": "yolo_generic:\n  detection_threshold: 0.1\n  nms_threshold: 0.1  # Lower values filter more\n  distance_threshold: 1\n\nyolo_trt_tiny:\n  names_file: data/obj.names\n  # engine_file: ../yolo/yolov4-facemask-tiny-fp16.trt\n  # engine_file: ../../tensorrt_batch8_fp16.trt\n  # engine_file: facemask_y4tiny_1024_608_fp16.trt\n  # engine_file: maskcam_y4t_1120_640_fp16.trt\n  engine_file: maskcam_y4t_1024_608_fp16.trt\n  use_cuda: true\n  input_width: 1024\n  input_height: 608\n  batch_size: 1\n  min_detection_size: 8  # Also see: face_min_size in face_mask_detector\n\ndebug:\n  output_detector_resolution: true\n  draw_detections: true\n  profiler: true\n"
  },
  {
    "path": "yolo/data/obj.data",
    "content": "classes = 4\ntrain = data/train.txt\nvalid = data/valid.txt\nnames = data/obj.names\nbackup = backup/\n"
  },
  {
    "path": "yolo/data/obj.names",
    "content": "mask\nno_mask\nnot_visible\nmisplaced\n"
  },
  {
    "path": "yolo/facemask-yolov4-tiny.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=16\n# Sizes: multiples of 32. Ratio (hd/4k videos): 1.78\n# width=1120\n# height=640\nwidth=1024\nheight=608\nchannels=3\nmomentum=0.9\ndecay=0.0005\nangle=10\nsaturation = 1.5\nexposure = 1.5\nhue=.2\n\nlearning_rate=0.00261\nburn_in=1000\npolicy=steps\n# max_batches = 8000\n# steps=6400,7200\nmax_batches = 12000\nsteps=9600,10800\nscales=.1,.1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers=-1\ngroups=2\ngroup_id=1\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -6,-1\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers=-1\ngroups=2\ngroup_id=1\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -6,-1\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers=-1\ngroups=2\ngroup_id=1\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1,-2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -6,-1\n\n[maxpool]\nsize=2\nstride=2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n##################################\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=27\nactivation=linear\n\n\n\n[yolo]\nmask = 3,4,5\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=4\nnum=6\njitter=.3\nscale_x_y = 1.05\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nignore_thresh = .7\ntruth_thresh = 1\nrandom=0\nresize=1.5\nnms_kind=greedynms\nbeta_nms=0.6\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = -1, 23\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=27\nactivation=linear\n\n[yolo]\nmask = 0,1,2\nanchors = 10,14,  23,27,  37,58,  81,82,  135,169,  344,319\nclasses=4\nnum=6\njitter=.3\nscale_x_y = 1.05\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nignore_thresh = .7\ntruth_thresh = 1\nrandom=0\nresize=1.5\nnms_kind=greedynms\nbeta_nms=0.6\n"
  },
  {
    "path": "yolo/facemask-yolov4.cfg",
    "content": "[net]\n# Testing\n#batch=1\n#subdivisions=1\n# Training\nbatch=64\nsubdivisions=32\nwidth=608\nheight=608\nchannels=3\nmomentum=0.949\ndecay=0.0005\nangle=0\nsaturation = 1.5\nexposure = 1.5\nhue=.1\n\nlearning_rate=0.001\nburn_in=1000\nmax_batches = 8000\npolicy=steps\nsteps=400000,450000\nscales=.1,.1\n\n#cutmix=1\nmosaic=1\n\n#:104x104 54:52x52 85:26x26 104:13x13 for 416\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=3\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=32\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-7\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=64\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-10\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-28\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n# Downsample\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=3\nstride=2\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -2\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=3\nstride=1\npad=1\nactivation=mish\n\n[shortcut]\nfrom=-3\nactivation=linear\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=mish\n\n[route]\nlayers = -1,-16\n\n[convolutional]\nbatch_normalize=1\nfilters=1024\nsize=1\nstride=1\npad=1\nactivation=mish\nstopbackward=800\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n### SPP ###\n[maxpool]\nstride=1\nsize=5\n\n[route]\nlayers=-2\n\n[maxpool]\nstride=1\nsize=9\n\n[route]\nlayers=-4\n\n[maxpool]\nstride=1\nsize=13\n\n[route]\nlayers=-1,-3,-5,-6\n### End SPP ###\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 85\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[upsample]\nstride=2\n\n[route]\nlayers = 54\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[route]\nlayers = -1, -3\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=128\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n##########################\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=256\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=27\nactivation=linear\n\n\n[yolo]\nmask = 0,1,2\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=4\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.2\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\nmax_delta=5\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=256\nactivation=leaky\n\n[route]\nlayers = -1, -16\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=256\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=512\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=27\nactivation=linear\n\n\n[yolo]\nmask = 3,4,5\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=4\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nscale_x_y = 1.1\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\nmax_delta=5\n\n\n[route]\nlayers = -4\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=2\npad=1\nfilters=512\nactivation=leaky\n\n[route]\nlayers = -1, -37\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nfilters=512\nsize=1\nstride=1\npad=1\nactivation=leaky\n\n[convolutional]\nbatch_normalize=1\nsize=3\nstride=1\npad=1\nfilters=1024\nactivation=leaky\n\n[convolutional]\nsize=1\nstride=1\npad=1\nfilters=27\nactivation=linear\n\n\n[yolo]\nmask = 6,7,8\nanchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401\nclasses=4\nnum=9\njitter=.3\nignore_thresh = .7\ntruth_thresh = 1\nrandom=1\nscale_x_y = 1.05\niou_thresh=0.213\ncls_normalizer=1.0\niou_normalizer=0.07\niou_loss=ciou\nnms_kind=greedynms\nbeta_nms=0.6\nmax_delta=5\n\n"
  },
  {
    "path": "yolo/integrations/yolo/detector_trt.py",
    "content": "import sys\nimport os\nimport time\nimport argparse\nimport numpy as np\nimport cv2\nimport tensorrt as trt\n\nimport pycuda.driver as cuda\nimport pycuda.autoinit\n\nfrom norfair.tracker import Detection\nfrom integrations.yolo.utils_pytorch import load_class_names, post_processing\n\n\"\"\"\nCode based on these implementations:\n - (main) https://github.com/NVIDIA/object-detection-tensorrt-example/blob/master/SSD_Model/utils/inference.py\n - (originally found here) https://github.com/Tianxiaomo/pytorch-YOLOv4/blob/master/demo_trt.py\n\"\"\"\n\n# Simple helper data class that's a little nicer to use than a 2-tuple.\nclass HostDeviceMem(object):\n    def __init__(self, host_mem, device_mem):\n        self.host = host_mem\n        self.device = device_mem\n\n    def __str__(self):\n        return \"Host:\\n\" + str(self.host) + \"\\nDevice:\\n\" + str(self.device)\n\n    def __repr__(self):\n        return self.__str__()\n\n\nclass DetectorYoloTRT:\n    \"\"\" Adaptor for the original Yolo implementation (AlexeyAB/darknet) \"\"\"\n\n    def __init__(self, config):\n        self.batch_size = config[\"batch_size\"]\n        self.input_h = config[\"input_height\"]\n        self.input_w = config[\"input_width\"]\n        self.detection_threshold = config[\"detection_threshold\"]\n        self.nms_threshold = config[\"nms_threshold\"]\n        self.engine_path = config[\"engine_file\"]\n        self.class_names = load_class_names(config[\"names_file\"])\n        if \"min_detection_size\" in config:\n            self.min_size = config[\"min_detection_size\"]\n        else:\n            self.min_size = 0\n\n        self.logger = trt.Logger()\n        self.runtime = trt.Runtime(self.logger)\n\n        print(\"Reading engine from file {}\".format(self.engine_path))\n        with open(self.engine_path, \"rb\") as f:\n            self.engine = self.runtime.deserialize_cuda_engine(f.read())\n\n        self.context = self.engine.create_execution_context()\n        self.buffers = self._allocate_buffers(self.engine)\n\n        self.context.set_binding_shape(\n            0, (self.batch_size, 3, self.input_h, self.input_w)\n        )\n        self.img_batch = np.zeros((self.batch_size, 3 * self.input_h * self.input_w))\n\n        self.timer_preprocess = 0.0\n        self.timer_inference = 0.0\n        self.timer_execute = 0.0\n        self.timer_postprocess = 0.0\n        self.n_frames = 0\n        self.n_inferences = 0\n\n    def print_profiler(self):\n        print(\n            f\"Batch size: {self.batch_size}\"\n            f\" | Frames processed: {self.n_frames}\"\n            f\" | # inferences executed: {self.n_inferences}\"\n        )\n        print(\n            f\"Avg preprocess time/frame:\\t{self.timer_preprocess/self.n_frames:.4f}s\"\n            f\"\\t| FPS: {self.n_frames / self.timer_preprocess:.1f}\"\n        )\n        print(\n            f\"Avg inference time/frame:\\t{self.timer_inference/self.n_frames:.4f}s\"\n            f\"\\t| FPS: {self.n_frames / self.timer_inference:.1f}\"\n        )\n        print(\n            f\"Avg postprocess time/frame:\\t{self.timer_postprocess/self.n_frames:.4f}s\"\n            f\"\\t| FPS: {self.n_frames / self.timer_postprocess:.1f}\"\n        )\n        print(\n            f\"Avg execute time/frame:\\t{self.timer_execute/self.n_frames:.4f}s\"\n            f\"\\t| FPS: {self.n_frames / self.timer_execute:.1f}\"\n        )\n        print(\n            f\"Avg execute time/inference:\\t{self.timer_execute/self.n_inferences:.4f}s\"\n        )\n\n    def detect(self, frames, rescale_detections=True):\n        inputs, outputs, bindings, stream = self.buffers\n        frames_resized = []\n        self.n_frames += len(frames)\n        tick = time.time()\n        for idx, frame in enumerate(frames):\n            orig_height, orig_width = frame.shape[:2]\n\n            # Input\n            frame_resized = cv2.resize(\n                frame, (self.input_w, self.input_h), interpolation=cv2.INTER_LINEAR\n            )\n            frames_resized.append(frame_resized)\n            img_in = cv2.cvtColor(frame_resized, cv2.COLOR_BGR2RGB)\n            img_in = np.transpose(img_in, (2, 0, 1)).astype(np.float32)\n            # img_in = np.expand_dims(img_in, axis=0)\n            img_in /= 255.0\n            # img_in = np.ascontiguousarray(img_in)\n            self.img_batch[idx] = img_in.ravel()\n        np.copyto(inputs[0].host, self.img_batch.ravel())\n        self.timer_preprocess += time.time() - tick\n        tick = time.time()\n\n        trt_outputs = self._do_inference(\n            self.context,\n            bindings=bindings,\n            inputs=inputs,\n            outputs=outputs,\n            stream=stream,\n        )\n        self.timer_inference += time.time() - tick\n\n        trt_outputs[0] = trt_outputs[0].reshape(self.batch_size, -1, 1, 4)\n        trt_outputs[1] = trt_outputs[1].reshape(\n            self.batch_size, -1, len(self.class_names)\n        )\n\n        # detection threshold + NMS filtering\n        tick = time.time()\n        detections = post_processing(\n            img_in, self.detection_threshold, self.nms_threshold, trt_outputs\n        )\n        self.timer_postprocess += time.time() - tick\n        dets_batches = []\n\n        for batch_idx in range(len(detections)):\n            width = orig_width if rescale_detections else self.input_w\n            height = orig_height if rescale_detections else self.input_h\n            dets = []\n            for k, d in enumerate(detections[batch_idx]):\n                d[0] *= width\n                d[1] *= height\n                d[2] *= width\n                d[3] *= height\n                if self.min_size:\n                    detection_width = d[2] - d[0]\n                    detection_height = d[3] - d[1]\n                    if min(detection_height, detection_width) < self.min_size:\n                        break\n                p = d[4]\n                label = self.class_names[d[6]]\n                dets.append(\n                    Detection(\n                        np.array((d[0:2], d[2:4])),\n                        data={\"label\": label, \"p\": p},\n                    )\n                )\n            dets_batches.append(dets)\n        return dets_batches, frames_resized\n\n    # This function is generalized for multiple inputs/outputs.\n    # inputs and outputs are expected to be lists of HostDeviceMem objects.\n    def _do_inference(self, context, bindings, inputs, outputs, stream):\n        self.n_inferences += 1\n        tick = time.time()\n        # Transfer input data to the GPU.\n        [cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]\n        # Run inference.\n        context.execute_async_v2(bindings=bindings, stream_handle=stream.handle)\n        # Transfer predictions back from the GPU.\n        [cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]\n        # Synchronize the stream\n        stream.synchronize()\n        # In this case, we measure the memcpy + execute ops together (small difference)\n        self.timer_execute += time.time() - tick\n        # Return only the host outputs.\n        return [out.host for out in outputs]\n\n    def _do_inference_sync(self, context, bindings, inputs, outputs, stream):\n        self.n_inferences += 1\n        # Transfer input data to the GPU.\n        [cuda.memcpy_htod(inp.device, inp.host) for inp in inputs]\n        # Run inference.\n        tick = time.time()\n        context.execute_v2(bindings=bindings)\n        self.timer_execute += time.time() - tick\n        # Transfer predictions back from the GPU.\n        [cuda.memcpy_dtoh(out.host, out.device) for out in outputs]\n        # Return only the host outputs.\n        return [out.host for out in outputs]\n\n    # Allocates all buffers required for an engine, i.e. host/device inputs/outputs.\n    def _allocate_buffers(self, engine):\n        inputs = []\n        outputs = []\n        bindings = []\n        stream = cuda.Stream()\n        for binding in engine:\n\n            # engine.max_batch_size is 1 for static batch\n            size = (\n                trt.volume(engine.get_binding_shape(binding))\n                * self.engine.max_batch_size\n            )\n            dims = engine.get_binding_shape(binding)\n\n            # in case batch dimension is -1 (dynamic)\n            if dims[0] < 0:\n                size *= -1\n\n            dtype = trt.nptype(engine.get_binding_dtype(binding))\n            # Allocate host and device buffers\n            host_mem = cuda.pagelocked_empty(size, dtype)\n            device_mem = cuda.mem_alloc(host_mem.nbytes)\n            # Append the device buffer to device bindings.\n            bindings.append(int(device_mem))\n            # Append to the appropriate list.\n            if engine.binding_is_input(binding):\n                inputs.append(HostDeviceMem(host_mem, device_mem))\n            else:\n                outputs.append(HostDeviceMem(host_mem, device_mem))\n        return inputs, outputs, bindings, stream\n\n    # Allocates all buffers required for an engine, i.e. host/device inputs/outputs.\n    def _allocate_buffers(self, engine):\n        inputs = []\n        outputs = []\n        bindings = []\n        stream = cuda.Stream()\n        for binding in engine:\n\n            # engine.max_batch_size is 1 for static batch\n            size = (\n                trt.volume(engine.get_binding_shape(binding))\n                * self.engine.max_batch_size\n            )\n            dims = engine.get_binding_shape(binding)\n\n            # in case batch dimension is -1 (dynamic)\n            if dims[0] < 0:\n                size *= -1\n\n            dtype = trt.nptype(engine.get_binding_dtype(binding))\n            # Allocate host and device buffers\n            host_mem = cuda.pagelocked_empty(size, dtype)\n            device_mem = cuda.mem_alloc(host_mem.nbytes)\n            # Append the device buffer to device bindings.\n            bindings.append(int(device_mem))\n            # Append to the appropriate list.\n            if engine.binding_is_input(binding):\n                inputs.append(HostDeviceMem(host_mem, device_mem))\n            else:\n                outputs.append(HostDeviceMem(host_mem, device_mem))\n        return inputs, outputs, bindings, stream\n"
  },
  {
    "path": "yolo/integrations/yolo/utils_pytorch.py",
    "content": "import numpy as np\n\n\n\"\"\"\nFunctions copied from pytorch-YOLOv4/tool/utils.py with all calls to print() removed\n\"\"\"\n\n\ndef nms_cpu(boxes, confs, nms_thresh=0.5, min_mode=False):\n    # print(boxes.shape)\n    x1 = boxes[:, 0]\n    y1 = boxes[:, 1]\n    x2 = boxes[:, 2]\n    y2 = boxes[:, 3]\n\n    areas = (x2 - x1) * (y2 - y1)\n    order = confs.argsort()[::-1]\n\n    keep = []\n    while order.size > 0:\n        idx_self = order[0]\n        idx_other = order[1:]\n\n        keep.append(idx_self)\n\n        xx1 = np.maximum(x1[idx_self], x1[idx_other])\n        yy1 = np.maximum(y1[idx_self], y1[idx_other])\n        xx2 = np.minimum(x2[idx_self], x2[idx_other])\n        yy2 = np.minimum(y2[idx_self], y2[idx_other])\n\n        w = np.maximum(0.0, xx2 - xx1)\n        h = np.maximum(0.0, yy2 - yy1)\n        inter = w * h\n\n        if min_mode:\n            over = inter / np.minimum(areas[order[0]], areas[order[1:]])\n        else:\n            over = inter / (areas[order[0]] + areas[order[1:]] - inter)\n\n        inds = np.where(over <= nms_thresh)[0]\n        order = order[inds + 1]\n\n    return np.array(keep)\n\n\ndef load_class_names(namesfile):\n    class_names = []\n    with open(namesfile, \"r\") as fp:\n        lines = fp.readlines()\n    for line in lines:\n        line = line.rstrip()\n        class_names.append(line)\n    return class_names\n\n\ndef post_processing(img, conf_thresh, nms_thresh, output):\n\n    # anchors = [12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401]\n    # num_anchors = 9\n    # anchor_masks = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]\n    # strides = [8, 16, 32]\n    # anchor_step = len(anchors) // num_anchors\n\n    # [batch, num, 1, 4]\n    box_array = output[0]\n    # [batch, num, num_classes]\n    confs = output[1]\n\n    if type(box_array).__name__ != \"ndarray\":\n        box_array = box_array.cpu().detach().numpy()\n        confs = confs.cpu().detach().numpy()\n\n    num_classes = confs.shape[2]\n\n    # [batch, num, 4]\n    box_array = box_array[:, :, 0]\n\n    # [batch, num, num_classes] --> [batch, num]\n    max_conf = np.max(confs, axis=2)\n    max_id = np.argmax(confs, axis=2)\n\n    bboxes_batch = []\n    for i in range(box_array.shape[0]):\n\n        argwhere = max_conf[i] > conf_thresh\n        l_box_array = box_array[i, argwhere, :]\n        l_max_conf = max_conf[i, argwhere]\n        l_max_id = max_id[i, argwhere]\n\n        bboxes = []\n        # nms for each class\n        for j in range(num_classes):\n\n            cls_argwhere = l_max_id == j\n            ll_box_array = l_box_array[cls_argwhere, :]\n            ll_max_conf = l_max_conf[cls_argwhere]\n            ll_max_id = l_max_id[cls_argwhere]\n\n            keep = nms_cpu(ll_box_array, ll_max_conf, nms_thresh)\n\n            if keep.size > 0:\n                ll_box_array = ll_box_array[keep, :]\n                ll_max_conf = ll_max_conf[keep]\n                ll_max_id = ll_max_id[keep]\n\n                for k in range(ll_box_array.shape[0]):\n                    bboxes.append(\n                        [\n                            ll_box_array[k, 0],\n                            ll_box_array[k, 1],\n                            ll_box_array[k, 2],\n                            ll_box_array[k, 3],\n                            ll_max_conf[k],\n                            ll_max_conf[k],\n                            ll_max_id[k],\n                        ]\n                    )\n\n        bboxes_batch.append(bboxes)\n\n    return bboxes_batch\n"
  },
  {
    "path": "yolo/integrations/yolo/yolo_adaptor.py",
    "content": "import cv2\nimport numpy as np\n\nfrom norfair.drawing import Color\n\n\nclass YoloAdaptor:\n    def __init__(self, config):\n        self.detection_threshold = config[\"detection_threshold\"]\n        self.distance_threshold = config[\"distance_threshold\"]\n\n    def classify_people(self, tracked_people):\n        p_masks = []\n        for d in tracked_people:\n            meta = d.last_detection.data\n            if meta[\"label\"] == \"mask\":\n                p_mask = float(meta[\"p\"])\n            elif meta[\"label\"] == \"no_mask\" or meta[\"label\"] == \"misplaced\":\n                p_mask = 1 - float(meta[\"p\"])\n            elif meta[\"label\"] == \"not_visible\":\n                p_mask = 0.5\n            else:\n                raise  # Unknown label\n            p_masks.append(p_mask)\n        return p_masks\n\n    def keypoints_distance(self, detected_pose, tracked_pose):\n        detected_points = detected_pose.points\n        estimated_pose = tracked_pose.estimate\n        min_box_size = min(\n            max(\n                detected_points[1][0] - detected_points[0][0],  # x2 - x1\n                detected_points[1][1] - detected_points[0][1],  # y2 - y1\n                1,\n            ),\n            max(\n                estimated_pose[1][0] - estimated_pose[0][0],  # x2 - x1\n                estimated_pose[1][1] - estimated_pose[0][1],  # y2 - y1\n                1,\n            ),\n        )\n        mean_distance_normalized = (\n            np.mean(np.linalg.norm(detected_points - estimated_pose, axis=1)) / min_box_size\n        )\n        return mean_distance_normalized\n\n    def person_has_face(self, person):\n        return person.last_detection.data[\"label\"] != \"not_visible\"\n\n    def get_person_head(self, person):\n        if person.live_points.sum() < 2:\n            return None\n        p1, p2 = person.estimate.astype(int)\n        return (tuple(p1), tuple(p2))\n\n    def draw_raw_detections(self, frame, detections):\n        for d in detections:\n            p1, p2 = d.points.astype(int)\n            bbox = (tuple(p1), tuple(p2))\n            label = d.data[\"label\"]\n            p = float(d.data[\"p\"])\n            color = (\n                Color.green\n                if label == \"mask\"\n                else (\n                    Color.red\n                    if label == \"no_mask\"\n                    else (Color.yellow if label == \"misplaced\" else Color.white)\n                )\n            )\n            cv2.rectangle(frame, bbox[0], bbox[1], color, 1)\n            cv2.putText(\n                frame,\n                f\"{label}: {p:.2f}\",\n                (bbox[0][0], bbox[0][1] - 5),\n                cv2.FONT_HERSHEY_SIMPLEX,\n                0.5,\n                color,\n                1,\n                cv2.LINE_AA,\n            )\n            # # Draw debugging info\n            # cv2.putText(\n            #     frame,\n            #     f\"width: {bbox[1][0] - bbox[0][0]}\",\n            #     (bbox[1][0], bbox[1][1] + 10),\n            #     cv2.FONT_HERSHEY_SIMPLEX,\n            #     0.5,\n            #     color,\n            #     1,\n            #     cv2.LINE_AA,\n            # )\n\n\n"
  },
  {
    "path": "yolo/run_yolo_images.py",
    "content": "################################################################################\n# Copyright (c) 2020-2021, Berkeley Design Technology, Inc. All rights reserved.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n################################################################################\n\n# %%\nimport os\nimport cv2\nimport sys\nimport yaml\nimport time\nimport glob\n\nfrom integrations.yolo.yolo_adaptor import YoloAdaptor\n\n# Requires python tensorrt, usually compiled for python 3.6 at system level\nfrom integrations.yolo.detector_trt import DetectorYoloTRT\n\n# %%\nimages_folder = sys.argv[1]\noutput_folder = sys.argv[2]\n\nprint(f\"Scanning input directory: {images_folder}\")\nimages = []\nfor filetype in [\"png\", \"jpg\", \"jpeg\"]:  # No uppercase for now\n    images += glob.glob(f\"{images_folder}/*.{filetype}\")\nprint(f\"Found {len(images)} images\")\n\nif input(f\"Confirm output to [{output_folder}] [y/n]\").strip() != \"y\":\n    print(\"Not confirmed. Exiting\")\n    exit(0)\n\nos.system(f\"mkdir -p {output_folder}\")\n\n# %%\nwith open(\"config_images.yml\", \"r\") as stream:\n    # Not using Loader=yaml.FullLoader since it doesn't work on jetson PyYAML version\n    config = yaml.load(stream)\n\nyolo_config = {**config[\"yolo_trt_tiny\"], **config[\"yolo_generic\"]}\n\ndetector = DetectorYoloTRT(yolo_config)\n\n# Converter functions from Yolo -> Tracker + FaceMaskDetector\npose_adaptor = YoloAdaptor(config[\"yolo_generic\"])\n\ndetector_output = config[\"debug\"][\"output_detector_resolution\"]\n\nfor k, image_filename in enumerate(images):\n\n    frame = cv2.imread(image_filename)\n    if (\n        detector_output\n    ):  # Only for debugging purposes: use resized frame in video output\n        detections, frames_resized = detector.detect([frame], rescale_detections=False)\n        frame = frames_resized[0]\n    else:\n        detections, _ = detector.detect([frame], rescale_detections=True)\n    detections = detections[0]  # Remove batch dimension\n\n    # Drawing functions\n    if config[\"debug\"][\"draw_detections\"]:  # Raw yolo detections\n        pose_adaptor.draw_raw_detections(frame, detections)\n\n    im_basename = image_filename.split(\"/\")[-1]\n    image_outfile = f\"{output_folder}/{im_basename}\"\n    cv2.imwrite(image_outfile, frame)\n    print(f\"Writing [{k}/{len(images)}]: {image_outfile}\")\n\nif config[\"debug\"][\"profiler\"]:\n    detector.print_profiler()\n"
  },
  {
    "path": "yolo/train_cu90.sh",
    "content": "mkdir -p backup\nLD_LIBRARY_PATH=/usr/local/cuda-9.0/lib64/ ./darknet detector train data/obj.data facemask-yolov4-tiny.cfg yolov4-tiny.conv.29 -dont_show -mjpeg_port 8090 -map\n"
  }
]